code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
def __a ( ): SCREAMING_SNAKE_CASE = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] SCREAMING_SNAKE_CASE = 6 SCREAMING_SNAKE_CASE = 1 SCREAMING_SNAKE_CASE = 1901 SCREAMING_SNAKE_CASE = 0 while year < 2001: day += 7 if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 SCREAMING_SNAKE_CASE = day - days_per_month[month - 2] elif day > 29 and month == 2: month += 1 SCREAMING_SNAKE_CASE = day - 29 else: if day > days_per_month[month - 1]: month += 1 SCREAMING_SNAKE_CASE = day - days_per_month[month - 2] if month > 12: year += 1 SCREAMING_SNAKE_CASE = 1 if year < 2001 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
16
from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent __A : List[Any] = {'UserAgent': UserAgent().random} def __a ( A__ : List[Any] ): SCREAMING_SNAKE_CASE = script.contents[0] SCREAMING_SNAKE_CASE = json.loads(data[data.find("{\"config\"" ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : List[str] , __lowerCamelCase : Optional[Any] ): SCREAMING_SNAKE_CASE = f"https://www.instagram.com/{username}/" SCREAMING_SNAKE_CASE = self.get_json() def _snake_case ( self : Tuple ): SCREAMING_SNAKE_CASE = requests.get(self.url , headers=__lowerCamelCase ).text SCREAMING_SNAKE_CASE = BeautifulSoup(__lowerCamelCase , "html.parser" ).find_all("script" ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__( self : Union[str, Any] ): return f"{self.__class__.__name__}('{self.username}')" def __str__( self : str ): return f"{self.fullname} ({self.username}) is {self.biography}" @property def _snake_case ( self : Optional[int] ): return self.user_data["username"] @property def _snake_case ( self : List[Any] ): return self.user_data["full_name"] @property def _snake_case ( self : List[str] ): return self.user_data["biography"] @property def _snake_case ( self : Tuple ): return self.user_data["business_email"] @property def _snake_case ( self : Optional[Any] ): return self.user_data["external_url"] @property def _snake_case ( self : int ): return self.user_data["edge_followed_by"]["count"] @property def _snake_case ( self : List[str] ): return self.user_data["edge_follow"]["count"] @property def _snake_case ( self : List[Any] ): return self.user_data["edge_owner_to_timeline_media"]["count"] @property def _snake_case ( self : Any ): return self.user_data["profile_pic_url_hd"] @property def _snake_case ( self : Optional[int] ): return self.user_data["is_verified"] @property def _snake_case ( self : Dict ): return self.user_data["is_private"] def __a ( A__ : str = "github" ): import os if os.environ.get("CI" ): return # test failing on GitHub Actions SCREAMING_SNAKE_CASE = InstagramUser(A__ ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , A__ ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 150 assert instagram_user.number_of_followers > 120000 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "[email protected]" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith("https://instagram." ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() __A : Dict = InstagramUser('github') print(instagram_user) print(f'{instagram_user.number_of_posts = }') print(f'{instagram_user.number_of_followers = }') print(f'{instagram_user.number_of_followings = }') print(f'{instagram_user.email = }') print(f'{instagram_user.website = }') print(f'{instagram_user.profile_picture_url = }') print(f'{instagram_user.is_verified = }') print(f'{instagram_user.is_private = }')
16
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __A : Optional[int] = logging.get_logger(__name__) __A : Optional[Any] = { 'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json', # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = "gpt_neox" def __init__( self : Optional[int] , __lowerCamelCase : List[str]=50432 , __lowerCamelCase : int=6144 , __lowerCamelCase : Optional[Any]=44 , __lowerCamelCase : Tuple=64 , __lowerCamelCase : Optional[int]=24576 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Any=0.25 , __lowerCamelCase : List[Any]=10000 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : int=0.0 , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[Any]=2048 , __lowerCamelCase : Tuple=0.02 , __lowerCamelCase : Tuple=1e-5 , __lowerCamelCase : Dict=True , __lowerCamelCase : int=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : List[str]=False , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : str , ): super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = rotary_pct SCREAMING_SNAKE_CASE = rotary_emb_base SCREAMING_SNAKE_CASE = attention_dropout SCREAMING_SNAKE_CASE = hidden_dropout SCREAMING_SNAKE_CASE = classifier_dropout SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = use_cache SCREAMING_SNAKE_CASE = tie_word_embeddings SCREAMING_SNAKE_CASE = use_parallel_residual SCREAMING_SNAKE_CASE = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( "The hidden size is not divisble by the number of attention heads! Make sure to update them!" ) def _snake_case ( self : Union[str, Any] ): if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __lowerCamelCase ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " f"got {self.rope_scaling}" ) SCREAMING_SNAKE_CASE = self.rope_scaling.get("type" , __lowerCamelCase ) SCREAMING_SNAKE_CASE = self.rope_scaling.get("factor" , __lowerCamelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" ) if rope_scaling_factor is None or not isinstance(__lowerCamelCase , __lowerCamelCase ) or rope_scaling_factor <= 1.0: raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
16
import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __A : Any = logging.get_logger(__name__) __A : Any = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } __A : Optional[Any] = { 'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'}, 'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'}, 'tokenizer_config_file': { 'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json' }, } __A : Union[str, Any] = {'facebook/blenderbot-3B': 1_2_8} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def __a ( ): SCREAMING_SNAKE_CASE = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) SCREAMING_SNAKE_CASE = bs[:] SCREAMING_SNAKE_CASE = 0 for b in range(2**8 ): if b not in bs: bs.append(A__ ) cs.append(2**8 + n ) n += 1 SCREAMING_SNAKE_CASE = [chr(A__ ) for n in cs] return dict(zip(A__ , A__ ) ) def __a ( A__ : List[Any] ): SCREAMING_SNAKE_CASE = set() SCREAMING_SNAKE_CASE = word[0] for char in word[1:]: pairs.add((prev_char, char) ) SCREAMING_SNAKE_CASE = char return pairs class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ = ["input_ids", "attention_mask"] def __init__( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any="replace" , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : Optional[Any]="</s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : Union[str, Any]="<s>" , __lowerCamelCase : List[str]="<unk>" , __lowerCamelCase : Optional[Any]="<pad>" , __lowerCamelCase : Dict="<mask>" , __lowerCamelCase : Any=False , **__lowerCamelCase : Optional[Any] , ): SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token super().__init__( errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , ) with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle: SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase ) SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()} SCREAMING_SNAKE_CASE = errors # how to handle errors in decoding SCREAMING_SNAKE_CASE = bytes_to_unicode() SCREAMING_SNAKE_CASE = {v: k for k, v in self.byte_encoder.items()} with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle: SCREAMING_SNAKE_CASE = merges_handle.read().split("\n" )[1:-1] SCREAMING_SNAKE_CASE = [tuple(merge.split() ) for merge in bpe_merges] SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions SCREAMING_SNAKE_CASE = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def _snake_case ( self : str ): return len(self.encoder ) def _snake_case ( self : Union[str, Any] ): return dict(self.encoder , **self.added_tokens_encoder ) def _snake_case ( self : Dict , __lowerCamelCase : List[Any] ): if token in self.cache: return self.cache[token] SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase ) SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase ) if not pairs: return token while True: SCREAMING_SNAKE_CASE = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = bigram SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = 0 while i < len(__lowerCamelCase ): try: SCREAMING_SNAKE_CASE = word.index(__lowerCamelCase , __lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) SCREAMING_SNAKE_CASE = j if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase ) SCREAMING_SNAKE_CASE = new_word if len(__lowerCamelCase ) == 1: break else: SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase ) SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase ) SCREAMING_SNAKE_CASE = word return word def _snake_case ( self : Optional[Any] , __lowerCamelCase : List[Any] ): SCREAMING_SNAKE_CASE = [] for token in re.findall(self.pat , __lowerCamelCase ): SCREAMING_SNAKE_CASE = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) ) return bpe_tokens def _snake_case ( self : Tuple , __lowerCamelCase : Dict ): return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) ) def _snake_case ( self : Any , __lowerCamelCase : Optional[int] ): return self.decoder.get(__lowerCamelCase ) def _snake_case ( self : Optional[int] , __lowerCamelCase : List[Any] ): SCREAMING_SNAKE_CASE = "".join(__lowerCamelCase ) SCREAMING_SNAKE_CASE = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def _snake_case ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): if not os.path.isdir(__lowerCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return SCREAMING_SNAKE_CASE = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) SCREAMING_SNAKE_CASE = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" ) SCREAMING_SNAKE_CASE = 0 with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) SCREAMING_SNAKE_CASE = token_index writer.write(" ".join(__lowerCamelCase ) + "\n" ) index += 1 return vocab_file, merge_file def _snake_case ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCamelCase )) + [1] return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1] def _snake_case ( self : Optional[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): SCREAMING_SNAKE_CASE = [self.sep_token_id] SCREAMING_SNAKE_CASE = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Any=False , **__lowerCamelCase : Union[str, Any] ): SCREAMING_SNAKE_CASE = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()): SCREAMING_SNAKE_CASE = " " + text return (text, kwargs) def _snake_case ( self : Any , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): return token_ids_a + [self.eos_token_id] def _snake_case ( self : int , __lowerCamelCase : "Conversation" ): SCREAMING_SNAKE_CASE = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(" " + text ) else: # Generated responses should contain them already. inputs.append(__lowerCamelCase ) SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.encode(__lowerCamelCase ) if len(__lowerCamelCase ) > self.model_max_length: SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :] logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." ) return input_ids
16
1
__A : List[str] = range(2, 2_0 + 1) __A : List[str] = [1_0**k for k in range(ks[-1] + 1)] __A : dict[int, dict[int, list[list[int]]]] = {} def __a ( A__ : List[Any] , A__ : Union[str, Any] , A__ : List[Any] , A__ : Any ): SCREAMING_SNAKE_CASE = sum(a_i[j] for j in range(A__ , len(A__ ) ) ) SCREAMING_SNAKE_CASE = sum(a_i[j] * base[j] for j in range(min(len(A__ ) , A__ ) ) ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0, 0 SCREAMING_SNAKE_CASE = n - i SCREAMING_SNAKE_CASE = memo.get(A__ ) if sub_memo is not None: SCREAMING_SNAKE_CASE = sub_memo.get(A__ ) if jumps is not None and len(A__ ) > 0: # find and make the largest jump without going over SCREAMING_SNAKE_CASE = -1 for _k in range(len(A__ ) - 1 , -1 , -1 ): if jumps[_k][2] <= k and jumps[_k][1] <= max_dn: SCREAMING_SNAKE_CASE = _k break if max_jump >= 0: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = jumps[max_jump] # since the difference between jumps is cached, add c SCREAMING_SNAKE_CASE = diff + c for j in range(min(A__ , len(A__ ) ) ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = divmod(A__ , 10 ) if new_c > 0: add(A__ , A__ , A__ ) else: SCREAMING_SNAKE_CASE = [] else: SCREAMING_SNAKE_CASE = {c: []} SCREAMING_SNAKE_CASE = sub_memo if dn >= max_dn or c + diff >= base[k]: return diff, dn if k > ks[0]: while True: # keep doing smaller jumps SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = next_term(A__ , k - 1 , i + dn , A__ ) diff += _diff dn += terms_jumped if dn >= max_dn or c + diff >= base[k]: break else: # would be too small a jump, just compute sequential terms instead SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = compute(A__ , A__ , i + dn , A__ ) diff += _diff dn += terms_jumped SCREAMING_SNAKE_CASE = sub_memo[c] # keep jumps sorted by # of terms skipped SCREAMING_SNAKE_CASE = 0 while j < len(A__ ): if jumps[j][1] > dn: break j += 1 # cache the jump for this value digitsum(b) and c sub_memo[c].insert(A__ , (diff, dn, k) ) return (diff, dn) def __a ( A__ : Any , A__ : List[str] , A__ : Any , A__ : Union[str, Any] ): if i >= n: return 0, i if k > len(A__ ): a_i.extend([0 for _ in range(k - len(A__ ) )] ) # note: a_i -> b * 10^k + c # ds_b -> digitsum(b) # ds_c -> digitsum(c) SCREAMING_SNAKE_CASE = i SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0, 0, 0 for j in range(len(A__ ) ): if j >= k: ds_b += a_i[j] else: ds_c += a_i[j] while i < n: i += 1 SCREAMING_SNAKE_CASE = ds_c + ds_b diff += addend SCREAMING_SNAKE_CASE = 0 for j in range(A__ ): SCREAMING_SNAKE_CASE = a_i[j] + addend SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = divmod(A__ , 10 ) ds_c += a_i[j] if addend > 0: break if addend > 0: add(A__ , A__ , A__ ) return diff, i - start_i def __a ( A__ : str , A__ : List[str] , A__ : List[Any] ): for j in range(A__ , len(A__ ) ): SCREAMING_SNAKE_CASE = digits[j] + addend if s >= 10: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = divmod(A__ , 10 ) SCREAMING_SNAKE_CASE = addend // 10 + quotient else: SCREAMING_SNAKE_CASE = s SCREAMING_SNAKE_CASE = addend // 10 if addend == 0: break while addend > 0: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = divmod(A__ , 10 ) digits.append(A__ ) def __a ( A__ : int = 10**15 ): SCREAMING_SNAKE_CASE = [1] SCREAMING_SNAKE_CASE = 1 SCREAMING_SNAKE_CASE = 0 while True: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = next_term(A__ , 20 , i + dn , A__ ) dn += terms_jumped if dn == n - i: break SCREAMING_SNAKE_CASE = 0 for j in range(len(A__ ) ): a_n += digits[j] * 10**j return a_n if __name__ == "__main__": print(f'{solution() = }')
16
import argparse import json import math import os import time import traceback import zipfile from collections import Counter import requests def __a ( A__ : str , A__ : List[Any]=None ): SCREAMING_SNAKE_CASE = None if token is not None: SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"} SCREAMING_SNAKE_CASE = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100" SCREAMING_SNAKE_CASE = requests.get(A__ , headers=A__ ).json() SCREAMING_SNAKE_CASE = {} try: job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} ) SCREAMING_SNAKE_CASE = math.ceil((result["total_count"] - 100) / 100 ) for i in range(A__ ): SCREAMING_SNAKE_CASE = requests.get(url + F"&page={i + 2}" , headers=A__ ).json() job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} ) return job_links except Exception: print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" ) return {} def __a ( A__ : List[Any] , A__ : Optional[int]=None ): SCREAMING_SNAKE_CASE = None if token is not None: SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"} SCREAMING_SNAKE_CASE = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100" SCREAMING_SNAKE_CASE = requests.get(A__ , headers=A__ ).json() SCREAMING_SNAKE_CASE = {} try: artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} ) SCREAMING_SNAKE_CASE = math.ceil((result["total_count"] - 100) / 100 ) for i in range(A__ ): SCREAMING_SNAKE_CASE = requests.get(url + F"&page={i + 2}" , headers=A__ ).json() artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} ) return artifacts except Exception: print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" ) return {} def __a ( A__ : Any , A__ : str , A__ : List[str] , A__ : Union[str, Any] ): SCREAMING_SNAKE_CASE = None if token is not None: SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"} SCREAMING_SNAKE_CASE = requests.get(A__ , headers=A__ , allow_redirects=A__ ) SCREAMING_SNAKE_CASE = result.headers["Location"] SCREAMING_SNAKE_CASE = requests.get(A__ , allow_redirects=A__ ) SCREAMING_SNAKE_CASE = os.path.join(A__ , F"{artifact_name}.zip" ) with open(A__ , "wb" ) as fp: fp.write(response.content ) def __a ( A__ : List[Any] , A__ : List[Any]=None ): SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = None with zipfile.ZipFile(A__ ) as z: for filename in z.namelist(): if not os.path.isdir(A__ ): # read the file if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]: with z.open(A__ ) as f: for line in f: SCREAMING_SNAKE_CASE = line.decode("UTF-8" ).strip() if filename == "failures_line.txt": try: # `error_line` is the place where `error` occurs SCREAMING_SNAKE_CASE = line[: line.index(": " )] SCREAMING_SNAKE_CASE = line[line.index(": " ) + len(": " ) :] errors.append([error_line, error] ) except Exception: # skip un-related lines pass elif filename == "summary_short.txt" and line.startswith("FAILED " ): # `test` is the test method that failed SCREAMING_SNAKE_CASE = line[len("FAILED " ) :] failed_tests.append(A__ ) elif filename == "job_name.txt": SCREAMING_SNAKE_CASE = line if len(A__ ) != len(A__ ): raise ValueError( F"`errors` and `failed_tests` should have the same number of elements. Got {len(A__ )} for `errors` " F"and {len(A__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some" " problem." ) SCREAMING_SNAKE_CASE = None if job_name and job_links: SCREAMING_SNAKE_CASE = job_links.get(A__ , A__ ) # A list with elements of the form (line of error, error, failed test) SCREAMING_SNAKE_CASE = [x + [y] + [job_link] for x, y in zip(A__ , A__ )] return result def __a ( A__ : Union[str, Any] , A__ : Union[str, Any]=None ): SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [os.path.join(A__ , A__ ) for p in os.listdir(A__ ) if p.endswith(".zip" )] for p in paths: errors.extend(get_errors_from_single_artifact(A__ , job_links=A__ ) ) return errors def __a ( A__ : List[str] , A__ : Tuple=None ): SCREAMING_SNAKE_CASE = Counter() counter.update([x[1] for x in logs] ) SCREAMING_SNAKE_CASE = counter.most_common() SCREAMING_SNAKE_CASE = {} for error, count in counts: if error_filter is None or error not in error_filter: SCREAMING_SNAKE_CASE = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]} SCREAMING_SNAKE_CASE = dict(sorted(r.items() , key=lambda A__ : item[1]["count"] , reverse=A__ ) ) return r def __a ( A__ : str ): SCREAMING_SNAKE_CASE = test.split("::" )[0] if test.startswith("tests/models/" ): SCREAMING_SNAKE_CASE = test.split("/" )[2] else: SCREAMING_SNAKE_CASE = None return test def __a ( A__ : List[str] , A__ : Dict=None ): SCREAMING_SNAKE_CASE = [(x[0], x[1], get_model(x[2] )) for x in logs] SCREAMING_SNAKE_CASE = [x for x in logs if x[2] is not None] SCREAMING_SNAKE_CASE = {x[2] for x in logs} SCREAMING_SNAKE_CASE = {} for test in tests: SCREAMING_SNAKE_CASE = Counter() # count by errors in `test` counter.update([x[1] for x in logs if x[2] == test] ) SCREAMING_SNAKE_CASE = counter.most_common() SCREAMING_SNAKE_CASE = {error: count for error, count in counts if (error_filter is None or error not in error_filter)} SCREAMING_SNAKE_CASE = sum(error_counts.values() ) if n_errors > 0: SCREAMING_SNAKE_CASE = {"count": n_errors, "errors": error_counts} SCREAMING_SNAKE_CASE = dict(sorted(r.items() , key=lambda A__ : item[1]["count"] , reverse=A__ ) ) return r def __a ( A__ : Dict ): SCREAMING_SNAKE_CASE = "| no. | error | status |" SCREAMING_SNAKE_CASE = "|-:|:-|:-|" SCREAMING_SNAKE_CASE = [header, sep] for error in reduced_by_error: SCREAMING_SNAKE_CASE = reduced_by_error[error]["count"] SCREAMING_SNAKE_CASE = F"| {count} | {error[:100]} | |" lines.append(A__ ) return "\n".join(A__ ) def __a ( A__ : Optional[Any] ): SCREAMING_SNAKE_CASE = "| model | no. of errors | major error | count |" SCREAMING_SNAKE_CASE = "|-:|-:|-:|-:|" SCREAMING_SNAKE_CASE = [header, sep] for model in reduced_by_model: SCREAMING_SNAKE_CASE = reduced_by_model[model]["count"] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = list(reduced_by_model[model]["errors"].items() )[0] SCREAMING_SNAKE_CASE = F"| {model} | {count} | {error[:60]} | {_count} |" lines.append(A__ ) return "\n".join(A__ ) if __name__ == "__main__": __A : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.') parser.add_argument( '--output_dir', type=str, required=True, help='Where to store the downloaded artifacts and other result files.', ) parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.') __A : Union[str, Any] = parser.parse_args() os.makedirs(args.output_dir, exist_ok=True) __A : int = get_job_links(args.workflow_run_id, token=args.token) __A : Dict = {} # To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee. # For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`. if _job_links: for k, v in _job_links.items(): # This is how GitHub actions combine job names. if " / " in k: __A : Union[str, Any] = k.find(' / ') __A : Optional[int] = k[index + len(' / ') :] __A : Optional[int] = v with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp: json.dump(job_links, fp, ensure_ascii=False, indent=4) __A : int = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) for idx, (name, url) in enumerate(artifacts.items()): download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) __A : Optional[int] = get_all_errors(args.output_dir, job_links=job_links) # `e[1]` is the error __A : Dict = Counter() counter.update([e[1] for e in errors]) # print the top 30 most common test errors __A : Optional[Any] = counter.most_common(3_0) for item in most_common: print(item) with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp: json.dump(errors, fp, ensure_ascii=False, indent=4) __A : str = reduce_by_error(errors) __A : int = reduce_by_model(errors) __A : Any = make_github_table(reduced_by_error) __A : List[str] = make_github_table_per_model(reduced_by_model) with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp: fp.write(sa) with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp: fp.write(sa)
16
1
import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __A : Any = logging.get_logger(__name__) __A : Any = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } __A : Optional[Any] = { 'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'}, 'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'}, 'tokenizer_config_file': { 'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json' }, } __A : Union[str, Any] = {'facebook/blenderbot-3B': 1_2_8} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def __a ( ): SCREAMING_SNAKE_CASE = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) SCREAMING_SNAKE_CASE = bs[:] SCREAMING_SNAKE_CASE = 0 for b in range(2**8 ): if b not in bs: bs.append(A__ ) cs.append(2**8 + n ) n += 1 SCREAMING_SNAKE_CASE = [chr(A__ ) for n in cs] return dict(zip(A__ , A__ ) ) def __a ( A__ : List[Any] ): SCREAMING_SNAKE_CASE = set() SCREAMING_SNAKE_CASE = word[0] for char in word[1:]: pairs.add((prev_char, char) ) SCREAMING_SNAKE_CASE = char return pairs class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ = ["input_ids", "attention_mask"] def __init__( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any="replace" , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : Optional[Any]="</s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : Union[str, Any]="<s>" , __lowerCamelCase : List[str]="<unk>" , __lowerCamelCase : Optional[Any]="<pad>" , __lowerCamelCase : Dict="<mask>" , __lowerCamelCase : Any=False , **__lowerCamelCase : Optional[Any] , ): SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token super().__init__( errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , ) with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle: SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase ) SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()} SCREAMING_SNAKE_CASE = errors # how to handle errors in decoding SCREAMING_SNAKE_CASE = bytes_to_unicode() SCREAMING_SNAKE_CASE = {v: k for k, v in self.byte_encoder.items()} with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle: SCREAMING_SNAKE_CASE = merges_handle.read().split("\n" )[1:-1] SCREAMING_SNAKE_CASE = [tuple(merge.split() ) for merge in bpe_merges] SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions SCREAMING_SNAKE_CASE = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def _snake_case ( self : str ): return len(self.encoder ) def _snake_case ( self : Union[str, Any] ): return dict(self.encoder , **self.added_tokens_encoder ) def _snake_case ( self : Dict , __lowerCamelCase : List[Any] ): if token in self.cache: return self.cache[token] SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase ) SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase ) if not pairs: return token while True: SCREAMING_SNAKE_CASE = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = bigram SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = 0 while i < len(__lowerCamelCase ): try: SCREAMING_SNAKE_CASE = word.index(__lowerCamelCase , __lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) SCREAMING_SNAKE_CASE = j if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase ) SCREAMING_SNAKE_CASE = new_word if len(__lowerCamelCase ) == 1: break else: SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase ) SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase ) SCREAMING_SNAKE_CASE = word return word def _snake_case ( self : Optional[Any] , __lowerCamelCase : List[Any] ): SCREAMING_SNAKE_CASE = [] for token in re.findall(self.pat , __lowerCamelCase ): SCREAMING_SNAKE_CASE = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) ) return bpe_tokens def _snake_case ( self : Tuple , __lowerCamelCase : Dict ): return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) ) def _snake_case ( self : Any , __lowerCamelCase : Optional[int] ): return self.decoder.get(__lowerCamelCase ) def _snake_case ( self : Optional[int] , __lowerCamelCase : List[Any] ): SCREAMING_SNAKE_CASE = "".join(__lowerCamelCase ) SCREAMING_SNAKE_CASE = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def _snake_case ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): if not os.path.isdir(__lowerCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return SCREAMING_SNAKE_CASE = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) SCREAMING_SNAKE_CASE = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" ) SCREAMING_SNAKE_CASE = 0 with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) SCREAMING_SNAKE_CASE = token_index writer.write(" ".join(__lowerCamelCase ) + "\n" ) index += 1 return vocab_file, merge_file def _snake_case ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCamelCase )) + [1] return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1] def _snake_case ( self : Optional[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): SCREAMING_SNAKE_CASE = [self.sep_token_id] SCREAMING_SNAKE_CASE = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Any=False , **__lowerCamelCase : Union[str, Any] ): SCREAMING_SNAKE_CASE = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()): SCREAMING_SNAKE_CASE = " " + text return (text, kwargs) def _snake_case ( self : Any , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): return token_ids_a + [self.eos_token_id] def _snake_case ( self : int , __lowerCamelCase : "Conversation" ): SCREAMING_SNAKE_CASE = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(" " + text ) else: # Generated responses should contain them already. inputs.append(__lowerCamelCase ) SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.encode(__lowerCamelCase ) if len(__lowerCamelCase ) > self.model_max_length: SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :] logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." ) return input_ids
16
from ...configuration_utils import PretrainedConfig from ...utils import logging __A : Optional[int] = logging.get_logger(__name__) __A : Optional[Any] = { 'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json', # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = "gpt_neox" def __init__( self : Optional[int] , __lowerCamelCase : List[str]=50432 , __lowerCamelCase : int=6144 , __lowerCamelCase : Optional[Any]=44 , __lowerCamelCase : Tuple=64 , __lowerCamelCase : Optional[int]=24576 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Any=0.25 , __lowerCamelCase : List[Any]=10000 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : int=0.0 , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[Any]=2048 , __lowerCamelCase : Tuple=0.02 , __lowerCamelCase : Tuple=1e-5 , __lowerCamelCase : Dict=True , __lowerCamelCase : int=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : List[str]=False , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : str , ): super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = rotary_pct SCREAMING_SNAKE_CASE = rotary_emb_base SCREAMING_SNAKE_CASE = attention_dropout SCREAMING_SNAKE_CASE = hidden_dropout SCREAMING_SNAKE_CASE = classifier_dropout SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = use_cache SCREAMING_SNAKE_CASE = tie_word_embeddings SCREAMING_SNAKE_CASE = use_parallel_residual SCREAMING_SNAKE_CASE = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( "The hidden size is not divisble by the number of attention heads! Make sure to update them!" ) def _snake_case ( self : Union[str, Any] ): if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __lowerCamelCase ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " f"got {self.rope_scaling}" ) SCREAMING_SNAKE_CASE = self.rope_scaling.get("type" , __lowerCamelCase ) SCREAMING_SNAKE_CASE = self.rope_scaling.get("factor" , __lowerCamelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" ) if rope_scaling_factor is None or not isinstance(__lowerCamelCase , __lowerCamelCase ) or rope_scaling_factor <= 1.0: raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
16
1
import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : List[str] ): SCREAMING_SNAKE_CASE = inspect.getfile(accelerate.test_utils ) SCREAMING_SNAKE_CASE = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] ) SCREAMING_SNAKE_CASE = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] ) SCREAMING_SNAKE_CASE = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] ) @require_multi_gpu def _snake_case ( self : str ): print(f"Found {torch.cuda.device_count()} devices." ) SCREAMING_SNAKE_CASE = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__lowerCamelCase , env=os.environ.copy() ) @require_multi_gpu def _snake_case ( self : List[Any] ): print(f"Found {torch.cuda.device_count()} devices." ) SCREAMING_SNAKE_CASE = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path] print(f"Command: {cmd}" ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__lowerCamelCase , env=os.environ.copy() ) @require_multi_gpu def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__lowerCamelCase , env=os.environ.copy() ) @require_multi_gpu def _snake_case ( self : Union[str, Any] ): print(f"Found {torch.cuda.device_count()} devices, using 2 devices only" ) SCREAMING_SNAKE_CASE = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ): execute_subprocess_async(__lowerCamelCase , env=os.environ.copy() ) if __name__ == "__main__": __A : Tuple = Accelerator() __A : Optional[Any] = (accelerator.state.process_index + 2, 1_0) __A : Union[str, Any] = torch.randint(0, 1_0, shape).to(accelerator.device) __A : Optional[Any] = '' __A : int = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." __A : Optional[int] = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." __A : str = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
16
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A : List[Any] = { 'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'], 'processing_git': ['GitProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : int = [ 'GIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'GitForCausalLM', 'GitModel', 'GitPreTrainedModel', 'GitVisionModel', ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys __A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
16
1
import argparse from pathlib import Path import torch from transformers import OPTConfig, OPTModel from transformers.utils import logging logging.set_verbosity_info() __A : List[str] = logging.get_logger(__name__) def __a ( A__ : Dict ): SCREAMING_SNAKE_CASE = torch.load(A__ , map_location="cpu" ) if "model" in sd.keys(): SCREAMING_SNAKE_CASE = torch.load(A__ , map_location="cpu" )["model"] # pop unnecessary weights SCREAMING_SNAKE_CASE = [ "decoder.version", "decoder.output_projection.weight", ] for key in keys_to_delete: if key in sd: sd.pop(A__ ) SCREAMING_SNAKE_CASE = { "decoder.project_in_dim.weight": "decoder.project_in.weight", "decoder.project_out_dim.weight": "decoder.project_out.weight", "decoder.layer_norm.weight": "decoder.final_layer_norm.weight", "decoder.layer_norm.bias": "decoder.final_layer_norm.bias", } for old_key, new_key in keys_to_rename.items(): if old_key in sd: SCREAMING_SNAKE_CASE = sd.pop(A__ ) SCREAMING_SNAKE_CASE = list(sd.keys() ) for key in keys: if ".qkv_proj." in key: SCREAMING_SNAKE_CASE = sd[key] # We split QKV in separate Q,K,V SCREAMING_SNAKE_CASE = key.replace(".qkv_proj." , ".q_proj." ) SCREAMING_SNAKE_CASE = key.replace(".qkv_proj." , ".k_proj." ) SCREAMING_SNAKE_CASE = key.replace(".qkv_proj." , ".v_proj." ) SCREAMING_SNAKE_CASE = value.shape[0] assert depth % 3 == 0 # `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming: # https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97 SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torch.split(A__ , depth // 3 , dim=0 ) SCREAMING_SNAKE_CASE = q SCREAMING_SNAKE_CASE = k SCREAMING_SNAKE_CASE = v del sd[key] return sd @torch.no_grad() def __a ( A__ : Optional[int] , A__ : Any , A__ : List[Any]=None ): SCREAMING_SNAKE_CASE = load_checkpoint(A__ ) if config is not None: SCREAMING_SNAKE_CASE = OPTConfig.from_pretrained(A__ ) else: SCREAMING_SNAKE_CASE = OPTConfig() SCREAMING_SNAKE_CASE = OPTModel(A__ ).half().eval() model.load_state_dict(A__ ) # Check results Path(A__ ).mkdir(exist_ok=A__ ) model.save_pretrained(A__ ) if __name__ == "__main__": __A : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--fairseq_path', type=str, help=( 'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:' ' https://huggingface.co/models?other=opt_metasq' ), ) parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.') __A : int = parser.parse_args() convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
16
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __A : Union[str, Any] = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) __A : List[Any] = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias')) rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias')) rename_keys.append( (f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append( ( f'transformer.decoder.layers.{i}.cross_attn.out_proj.weight', f'decoder.layers.{i}.encoder_attn.out_proj.weight', ) ) rename_keys.append( ( f'transformer.decoder.layers.{i}.cross_attn.out_proj.bias', f'decoder.layers.{i}.encoder_attn.out_proj.bias', ) ) rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias')) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (f'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', f'decoder.layers.{i}.sa_qcontent_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', f'decoder.layers.{i}.sa_kcontent_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.sa_qpos_proj.weight', f'decoder.layers.{i}.sa_qpos_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.sa_kpos_proj.weight', f'decoder.layers.{i}.sa_kpos_proj.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.weight', f'decoder.layers.{i}.sa_v_proj.weight')) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', f'decoder.layers.{i}.ca_qcontent_proj.weight') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', f'decoder.layers.{i}.ca_kcontent_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_kpos_proj.weight', f'decoder.layers.{i}.ca_kpos_proj.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.weight', f'decoder.layers.{i}.ca_v_proj.weight')) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', f'decoder.layers.{i}.ca_qpos_sine_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', f'decoder.layers.{i}.sa_qcontent_proj.bias') ) rename_keys.append( (f'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', f'decoder.layers.{i}.sa_kcontent_proj.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.sa_qpos_proj.bias', f'decoder.layers.{i}.sa_qpos_proj.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.sa_kpos_proj.bias', f'decoder.layers.{i}.sa_kpos_proj.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.bias', f'decoder.layers.{i}.sa_v_proj.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', f'decoder.layers.{i}.ca_qcontent_proj.bias') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', f'decoder.layers.{i}.ca_kcontent_proj.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.ca_kpos_proj.bias', f'decoder.layers.{i}.ca_kpos_proj.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.bias', f'decoder.layers.{i}.ca_v_proj.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', f'decoder.layers.{i}.ca_qpos_sine_proj.bias') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ('input_proj.weight', 'input_projection.weight'), ('input_proj.bias', 'input_projection.bias'), ('query_embed.weight', 'query_position_embeddings.weight'), ('transformer.decoder.norm.weight', 'decoder.layernorm.weight'), ('transformer.decoder.norm.bias', 'decoder.layernorm.bias'), ('class_embed.weight', 'class_labels_classifier.weight'), ('class_embed.bias', 'class_labels_classifier.bias'), ('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'), ('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'), ('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'), ('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'), ('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'), ('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'), ('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'), ('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'), ('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'), ('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'), ('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'), ('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'), ('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'), ('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'), ('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'), ('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'), ] ) def __a ( A__ : Dict , A__ : Dict , A__ : Any ): SCREAMING_SNAKE_CASE = state_dict.pop(A__ ) SCREAMING_SNAKE_CASE = val def __a ( A__ : Optional[int] ): SCREAMING_SNAKE_CASE = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: SCREAMING_SNAKE_CASE = key.replace("backbone.0.body" , "backbone.conv_encoder.model" ) SCREAMING_SNAKE_CASE = value else: SCREAMING_SNAKE_CASE = value return new_state_dict def __a ( A__ : Optional[Any] , A__ : Tuple=False ): SCREAMING_SNAKE_CASE = "" if is_panoptic: SCREAMING_SNAKE_CASE = "conditional_detr." # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" ) SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE = in_proj_weight[:256, :] SCREAMING_SNAKE_CASE = in_proj_bias[:256] SCREAMING_SNAKE_CASE = in_proj_weight[256:512, :] SCREAMING_SNAKE_CASE = in_proj_bias[256:512] SCREAMING_SNAKE_CASE = in_proj_weight[-256:, :] SCREAMING_SNAKE_CASE = in_proj_bias[-256:] def __a ( ): SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg" SCREAMING_SNAKE_CASE = Image.open(requests.get(A__ , stream=A__ ).raw ) return im @torch.no_grad() def __a ( A__ : List[str] , A__ : Union[str, Any] ): SCREAMING_SNAKE_CASE = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: SCREAMING_SNAKE_CASE = "resnet101" if "dc5" in model_name: SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = "panoptic" in model_name if is_panoptic: SCREAMING_SNAKE_CASE = 250 else: SCREAMING_SNAKE_CASE = 91 SCREAMING_SNAKE_CASE = "huggingface/label-files" SCREAMING_SNAKE_CASE = "coco-detection-id2label.json" SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(A__ , A__ , repo_type="dataset" ) , "r" ) ) SCREAMING_SNAKE_CASE = {int(A__ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE = idalabel SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} # load image processor SCREAMING_SNAKE_CASE = "coco_panoptic" if is_panoptic else "coco_detection" SCREAMING_SNAKE_CASE = ConditionalDetrImageProcessor(format=A__ ) # prepare image SCREAMING_SNAKE_CASE = prepare_img() SCREAMING_SNAKE_CASE = image_processor(images=A__ , return_tensors="pt" ) SCREAMING_SNAKE_CASE = encoding["pixel_values"] logger.info(F"Converting model {model_name}..." ) # load original model from torch hub SCREAMING_SNAKE_CASE = torch.hub.load("DeppMeng/ConditionalDETR" , A__ , pretrained=A__ ).eval() SCREAMING_SNAKE_CASE = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: SCREAMING_SNAKE_CASE = "conditional_detr." + src rename_key(A__ , A__ , A__ ) SCREAMING_SNAKE_CASE = rename_backbone_keys(A__ ) # query, key and value matrices need special treatment read_in_q_k_v(A__ , is_panoptic=A__ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them SCREAMING_SNAKE_CASE = "conditional_detr.model." if is_panoptic else "model." for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("conditional_detr" ) and not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ) ): SCREAMING_SNAKE_CASE = state_dict.pop(A__ ) SCREAMING_SNAKE_CASE = val elif "class_labels_classifier" in key or "bbox_predictor" in key: SCREAMING_SNAKE_CASE = state_dict.pop(A__ ) SCREAMING_SNAKE_CASE = val elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ): continue else: SCREAMING_SNAKE_CASE = state_dict.pop(A__ ) SCREAMING_SNAKE_CASE = val else: if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ): SCREAMING_SNAKE_CASE = state_dict.pop(A__ ) SCREAMING_SNAKE_CASE = val # finally, create HuggingFace model and load state dict SCREAMING_SNAKE_CASE = ConditionalDetrForSegmentation(A__ ) if is_panoptic else ConditionalDetrForObjectDetection(A__ ) model.load_state_dict(A__ ) model.eval() model.push_to_hub(repo_id=A__ , organization="DepuMeng" , commit_message="Add model" ) # verify our conversion SCREAMING_SNAKE_CASE = conditional_detr(A__ ) SCREAMING_SNAKE_CASE = model(A__ ) assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 ) # Save model and image processor logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." ) Path(A__ ).mkdir(exist_ok=A__ ) model.save_pretrained(A__ ) image_processor.save_pretrained(A__ ) if __name__ == "__main__": __A : str = argparse.ArgumentParser() parser.add_argument( '--model_name', default='conditional_detr_resnet50', type=str, help='Name of the CONDITIONAL_DETR model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) __A : int = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
16
1
def __a ( A__ : list[list[float]] ): SCREAMING_SNAKE_CASE = [] for data in source_data: for i, el in enumerate(A__ ): if len(A__ ) < i + 1: data_lists.append([] ) data_lists[i].append(float(A__ ) ) return data_lists def __a ( A__ : list[list[float]] , A__ : list[int] ): SCREAMING_SNAKE_CASE = [] for dlist, weight in zip(A__ , A__ ): SCREAMING_SNAKE_CASE = min(A__ ) SCREAMING_SNAKE_CASE = max(A__ ) SCREAMING_SNAKE_CASE = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: SCREAMING_SNAKE_CASE = F"Invalid weight of {weight:f} provided" raise ValueError(A__ ) score_lists.append(A__ ) return score_lists def __a ( A__ : list[list[float]] ): SCREAMING_SNAKE_CASE = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(A__ ): SCREAMING_SNAKE_CASE = final_scores[j] + ele return final_scores def __a ( A__ : list[list[float]] , A__ : list[int] ): SCREAMING_SNAKE_CASE = get_data(A__ ) SCREAMING_SNAKE_CASE = calculate_each_score(A__ , A__ ) SCREAMING_SNAKE_CASE = generate_final_scores(A__ ) # append scores to source data for i, ele in enumerate(A__ ): source_data[i].append(A__ ) return source_data
16
from __future__ import annotations def __a ( A__ : list[int | str] ): create_state_space_tree(A__ , [] , 0 , [0 for i in range(len(A__ ) )] ) def __a ( A__ : list[int | str] , A__ : list[int | str] , A__ : int , A__ : list[int] , ): if index == len(A__ ): print(A__ ) return for i in range(len(A__ ) ): if not index_used[i]: current_sequence.append(sequence[i] ) SCREAMING_SNAKE_CASE = True create_state_space_tree(A__ , A__ , index + 1 , A__ ) current_sequence.pop() SCREAMING_SNAKE_CASE = False __A : list[int | str] = [3, 1, 2, 4] generate_all_permutations(sequence) __A : list[int | str] = ["A", "B", "C"] generate_all_permutations(sequence_a)
16
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __A : Dict = logging.get_logger(__name__) __A : Union[str, Any] = { 'MIT/ast-finetuned-audioset-10-10-0.4593': ( 'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json' ), } class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = "audio-spectrogram-transformer" def __init__( self : int , __lowerCamelCase : str=768 , __lowerCamelCase : int=12 , __lowerCamelCase : Optional[Any]=12 , __lowerCamelCase : Union[str, Any]=3072 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Tuple=0.0 , __lowerCamelCase : Any=0.0 , __lowerCamelCase : Any=0.02 , __lowerCamelCase : str=1e-12 , __lowerCamelCase : Any=16 , __lowerCamelCase : str=True , __lowerCamelCase : List[str]=10 , __lowerCamelCase : Dict=10 , __lowerCamelCase : List[str]=1024 , __lowerCamelCase : List[Any]=128 , **__lowerCamelCase : Optional[int] , ): super().__init__(**__lowerCamelCase ) SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = patch_size SCREAMING_SNAKE_CASE = qkv_bias SCREAMING_SNAKE_CASE = frequency_stride SCREAMING_SNAKE_CASE = time_stride SCREAMING_SNAKE_CASE = max_length SCREAMING_SNAKE_CASE = num_mel_bins
16
def __a ( A__ : int = 1000 ): SCREAMING_SNAKE_CASE = 3 SCREAMING_SNAKE_CASE = 0 while a < n: if a % 3 == 0 or a % 5 == 0: result += a elif a % 15 == 0: result -= a a += 1 return result if __name__ == "__main__": print(f'{solution() = }')
16
1
from __future__ import annotations def __a ( A__ : list[int] ): if len(A__ ) == 0: return array SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = min(A__ ), max(A__ ) # Compute the variables SCREAMING_SNAKE_CASE = _max - _min + 1 SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = [0] * holes_range, [0] * holes_range # Make the sorting. for i in array: SCREAMING_SNAKE_CASE = i - _min SCREAMING_SNAKE_CASE = i holes_repeat[index] += 1 # Makes the array back by replacing the numbers. SCREAMING_SNAKE_CASE = 0 for i in range(A__ ): while holes_repeat[i] > 0: SCREAMING_SNAKE_CASE = holes[i] index += 1 holes_repeat[i] -= 1 # Returns the sorted array. return array if __name__ == "__main__": import doctest doctest.testmod() __A : Optional[int] = input('Enter numbers separated by comma:\n') __A : str = [int(x) for x in user_input.split(',')] print(pigeon_sort(unsorted))
16
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A : Dict = { 'configuration_bigbird_pegasus': [ 'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BigBirdPegasusConfig', 'BigBirdPegasusOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : int = [ 'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST', 'BigBirdPegasusForCausalLM', 'BigBirdPegasusForConditionalGeneration', 'BigBirdPegasusForQuestionAnswering', 'BigBirdPegasusForSequenceClassification', 'BigBirdPegasusModel', 'BigBirdPegasusPreTrainedModel', ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys __A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
16
1
def __a ( A__ : int , A__ : int , A__ : list[list[int]] ): def update_area_of_max_square(A__ : int , A__ : int ) -> int: # BASE CASE if row >= rows or col >= cols: return 0 SCREAMING_SNAKE_CASE = update_area_of_max_square(A__ , col + 1 ) SCREAMING_SNAKE_CASE = update_area_of_max_square(row + 1 , col + 1 ) SCREAMING_SNAKE_CASE = update_area_of_max_square(row + 1 , A__ ) if mat[row][col]: SCREAMING_SNAKE_CASE = 1 + min([right, diagonal, down] ) SCREAMING_SNAKE_CASE = max(largest_square_area[0] , A__ ) return sub_problem_sol else: return 0 SCREAMING_SNAKE_CASE = [0] update_area_of_max_square(0 , 0 ) return largest_square_area[0] def __a ( A__ : int , A__ : int , A__ : list[list[int]] ): def update_area_of_max_square_using_dp_array( A__ : int , A__ : int , A__ : list[list[int]] ) -> int: if row >= rows or col >= cols: return 0 if dp_array[row][col] != -1: return dp_array[row][col] SCREAMING_SNAKE_CASE = update_area_of_max_square_using_dp_array(A__ , col + 1 , A__ ) SCREAMING_SNAKE_CASE = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , A__ ) SCREAMING_SNAKE_CASE = update_area_of_max_square_using_dp_array(row + 1 , A__ , A__ ) if mat[row][col]: SCREAMING_SNAKE_CASE = 1 + min([right, diagonal, down] ) SCREAMING_SNAKE_CASE = max(largest_square_area[0] , A__ ) SCREAMING_SNAKE_CASE = sub_problem_sol return sub_problem_sol else: return 0 SCREAMING_SNAKE_CASE = [0] SCREAMING_SNAKE_CASE = [[-1] * cols for _ in range(A__ )] update_area_of_max_square_using_dp_array(0 , 0 , A__ ) return largest_square_area[0] def __a ( A__ : int , A__ : int , A__ : list[list[int]] ): SCREAMING_SNAKE_CASE = [[0] * (cols + 1) for _ in range(rows + 1 )] SCREAMING_SNAKE_CASE = 0 for row in range(rows - 1 , -1 , -1 ): for col in range(cols - 1 , -1 , -1 ): SCREAMING_SNAKE_CASE = dp_array[row][col + 1] SCREAMING_SNAKE_CASE = dp_array[row + 1][col + 1] SCREAMING_SNAKE_CASE = dp_array[row + 1][col] if mat[row][col] == 1: SCREAMING_SNAKE_CASE = 1 + min(A__ , A__ , A__ ) SCREAMING_SNAKE_CASE = max(dp_array[row][col] , A__ ) else: SCREAMING_SNAKE_CASE = 0 return largest_square_area def __a ( A__ : int , A__ : int , A__ : list[list[int]] ): SCREAMING_SNAKE_CASE = [0] * (cols + 1) SCREAMING_SNAKE_CASE = [0] * (cols + 1) SCREAMING_SNAKE_CASE = 0 for row in range(rows - 1 , -1 , -1 ): for col in range(cols - 1 , -1 , -1 ): SCREAMING_SNAKE_CASE = current_row[col + 1] SCREAMING_SNAKE_CASE = next_row[col + 1] SCREAMING_SNAKE_CASE = next_row[col] if mat[row][col] == 1: SCREAMING_SNAKE_CASE = 1 + min(A__ , A__ , A__ ) SCREAMING_SNAKE_CASE = max(current_row[col] , A__ ) else: SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = current_row return largest_square_area if __name__ == "__main__": import doctest doctest.testmod() print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
16
import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Any ): SCREAMING_SNAKE_CASE = "hf-internal-testing/tiny-random-t5" SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer("This is me" , return_tensors="pt" ) SCREAMING_SNAKE_CASE = model.to_bettertransformer() self.assertTrue(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) SCREAMING_SNAKE_CASE = model.generate(**__lowerCamelCase ) SCREAMING_SNAKE_CASE = model.reverse_bettertransformer() self.assertFalse(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase ) self.assertFalse( any("BetterTransformer" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) SCREAMING_SNAKE_CASE = model_reloaded.generate(**__lowerCamelCase ) self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase ) ) def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = "hf-internal-testing/tiny-random-t5" SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(__lowerCamelCase ): model.save_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = model.reverse_bettertransformer() model.save_pretrained(__lowerCamelCase )
16
1
import inspect from typing import Callable, List, Optional, Union import torch from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, WhisperForConditionalGeneration, WhisperProcessor, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.utils import logging __A : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' def __init__( self : Tuple , __lowerCamelCase : WhisperForConditionalGeneration , __lowerCamelCase : WhisperProcessor , __lowerCamelCase : AutoencoderKL , __lowerCamelCase : CLIPTextModel , __lowerCamelCase : CLIPTokenizer , __lowerCamelCase : UNetaDConditionModel , __lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __lowerCamelCase : StableDiffusionSafetyChecker , __lowerCamelCase : CLIPImageProcessor , ): super().__init__() if safety_checker is None: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) self.register_modules( speech_model=__lowerCamelCase , speech_processor=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , unet=__lowerCamelCase , scheduler=__lowerCamelCase , feature_extractor=__lowerCamelCase , ) def _snake_case ( self : Tuple , __lowerCamelCase : Optional[Union[str, int]] = "auto" ): if slice_size == "auto": SCREAMING_SNAKE_CASE = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__lowerCamelCase ) def _snake_case ( self : Any ): self.enable_attention_slicing(__lowerCamelCase ) @torch.no_grad() def __call__( self : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str]=16000 , __lowerCamelCase : int = 512 , __lowerCamelCase : int = 512 , __lowerCamelCase : int = 50 , __lowerCamelCase : float = 7.5 , __lowerCamelCase : Optional[Union[str, List[str]]] = None , __lowerCamelCase : Optional[int] = 1 , __lowerCamelCase : float = 0.0 , __lowerCamelCase : Optional[torch.Generator] = None , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowerCamelCase : int = 1 , **__lowerCamelCase : Dict , ): SCREAMING_SNAKE_CASE = self.speech_processor.feature_extractor( __lowerCamelCase , return_tensors="pt" , sampling_rate=__lowerCamelCase ).input_features.to(self.device ) SCREAMING_SNAKE_CASE = self.speech_model.generate(__lowerCamelCase , max_length=480000 ) SCREAMING_SNAKE_CASE = self.speech_processor.tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase , normalize=__lowerCamelCase )[ 0 ] if isinstance(__lowerCamelCase , __lowerCamelCase ): SCREAMING_SNAKE_CASE = 1 elif isinstance(__lowerCamelCase , __lowerCamelCase ): SCREAMING_SNAKE_CASE = len(__lowerCamelCase ) else: raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(__lowerCamelCase )}" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__lowerCamelCase , __lowerCamelCase ) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(__lowerCamelCase )}." ) # get prompt text embeddings SCREAMING_SNAKE_CASE = self.tokenizer( __lowerCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , ) SCREAMING_SNAKE_CASE = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) SCREAMING_SNAKE_CASE = text_input_ids[:, : self.tokenizer.model_max_length] SCREAMING_SNAKE_CASE = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = text_embeddings.shape SCREAMING_SNAKE_CASE = text_embeddings.repeat(1 , __lowerCamelCase , 1 ) SCREAMING_SNAKE_CASE = text_embeddings.view(bs_embed * num_images_per_prompt , __lowerCamelCase , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. SCREAMING_SNAKE_CASE = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: SCREAMING_SNAKE_CASE = 42 if negative_prompt is None: SCREAMING_SNAKE_CASE = [""] * batch_size elif type(__lowerCamelCase ) is not type(__lowerCamelCase ): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(__lowerCamelCase )} !=" f" {type(__lowerCamelCase )}." ) elif isinstance(__lowerCamelCase , __lowerCamelCase ): SCREAMING_SNAKE_CASE = [negative_prompt] elif batch_size != len(__lowerCamelCase ): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(__lowerCamelCase )}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: SCREAMING_SNAKE_CASE = negative_prompt SCREAMING_SNAKE_CASE = text_input_ids.shape[-1] SCREAMING_SNAKE_CASE = self.tokenizer( __lowerCamelCase , padding="max_length" , max_length=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors="pt" , ) SCREAMING_SNAKE_CASE = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method SCREAMING_SNAKE_CASE = uncond_embeddings.shape[1] SCREAMING_SNAKE_CASE = uncond_embeddings.repeat(1 , __lowerCamelCase , 1 ) SCREAMING_SNAKE_CASE = uncond_embeddings.view(batch_size * num_images_per_prompt , __lowerCamelCase , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes SCREAMING_SNAKE_CASE = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. SCREAMING_SNAKE_CASE = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) SCREAMING_SNAKE_CASE = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps SCREAMING_SNAKE_CASE = torch.randn(__lowerCamelCase , generator=__lowerCamelCase , device="cpu" , dtype=__lowerCamelCase ).to( self.device ) else: SCREAMING_SNAKE_CASE = torch.randn(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=__lowerCamelCase ) else: if latents.shape != latents_shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" ) SCREAMING_SNAKE_CASE = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(__lowerCamelCase ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand SCREAMING_SNAKE_CASE = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler SCREAMING_SNAKE_CASE = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] SCREAMING_SNAKE_CASE = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) SCREAMING_SNAKE_CASE = {} if accepts_eta: SCREAMING_SNAKE_CASE = eta for i, t in enumerate(self.progress_bar(__lowerCamelCase ) ): # expand the latents if we are doing classifier free guidance SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents SCREAMING_SNAKE_CASE = self.scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase ) # predict the noise residual SCREAMING_SNAKE_CASE = self.unet(__lowerCamelCase , __lowerCamelCase , encoder_hidden_states=__lowerCamelCase ).sample # perform guidance if do_classifier_free_guidance: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.chunk(2 ) SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 SCREAMING_SNAKE_CASE = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = 1 / 0.18_215 * latents SCREAMING_SNAKE_CASE = self.vae.decode(__lowerCamelCase ).sample SCREAMING_SNAKE_CASE = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": SCREAMING_SNAKE_CASE = self.numpy_to_pil(__lowerCamelCase ) if not return_dict: return image return StableDiffusionPipelineOutput(images=__lowerCamelCase , nsfw_content_detected=__lowerCamelCase )
16
import copy import inspect import unittest from transformers import PretrainedConfig, SwiftFormerConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwiftFormerForImageClassification, SwiftFormerModel from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : int=13 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : str=True , __lowerCamelCase : Any=True , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Optional[int]=224 , __lowerCamelCase : Any=1000 , __lowerCamelCase : Optional[Any]=[3, 3, 6, 4] , __lowerCamelCase : List[Any]=[48, 56, 112, 220] , ): SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = is_training SCREAMING_SNAKE_CASE = use_labels SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = num_labels SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = layer_depths SCREAMING_SNAKE_CASE = embed_dims def _snake_case ( self : Optional[Any] ): SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE = None if self.use_labels: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels ) SCREAMING_SNAKE_CASE = self.get_config() return config, pixel_values, labels def _snake_case ( self : Dict ): return SwiftFormerConfig( depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=__lowerCamelCase , layer_scale_init_value=1e-5 , ) def _snake_case ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ): SCREAMING_SNAKE_CASE = SwiftFormerModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) ) def _snake_case ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] ): SCREAMING_SNAKE_CASE = self.num_labels SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE = model(__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self : int ): ((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else () lowerCamelCase__ = ( {"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification} if is_torch_available() else {} ) lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = SwiftFormerModelTester(self ) SCREAMING_SNAKE_CASE = ConfigTester( self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , ) def _snake_case ( self : List[Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="SwiftFormer does not use inputs_embeds" ) def _snake_case ( self : Optional[int] ): pass def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) SCREAMING_SNAKE_CASE = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) ) def _snake_case ( self : List[Any] ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) SCREAMING_SNAKE_CASE = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def _snake_case ( self : Tuple ): for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE = SwiftFormerModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) @unittest.skip(reason="SwiftFormer does not output attentions" ) def _snake_case ( self : Union[str, Any] ): pass def _snake_case ( self : Optional[Any] ): def check_hidden_states_output(__lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Tuple ): SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) SCREAMING_SNAKE_CASE = outputs.hidden_states SCREAMING_SNAKE_CASE = 8 self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase ) # TODO # SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width) # with the width and height being successively divided by 2, after every 2 blocks for i in range(len(__lowerCamelCase ) ): self.assertEqual( hidden_states[i].shape , torch.Size( [ self.model_tester.batch_size, self.model_tester.embed_dims[i // 2], (self.model_tester.image_size // 4) // 2 ** (i // 2), (self.model_tester.image_size // 4) // 2 ** (i // 2), ] ) , ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def _snake_case ( self : List[Any] ): def _config_zero_init(__lowerCamelCase : List[Any] ): SCREAMING_SNAKE_CASE = copy.deepcopy(__lowerCamelCase ) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(__lowerCamelCase , __lowerCamelCase , 1e-10 ) if isinstance(getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ): SCREAMING_SNAKE_CASE = _config_zero_init(getattr(__lowerCamelCase , __lowerCamelCase ) ) setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) return configs_no_init SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE = _config_zero_init(__lowerCamelCase ) for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = model_class(config=__lowerCamelCase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , ) @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def _snake_case ( self : str ): pass def __a ( ): SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @cached_property def _snake_case ( self : List[str] ): return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None @slow def _snake_case ( self : List[Any] ): SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.default_image_processor SCREAMING_SNAKE_CASE = prepare_img() SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ) # verify the logits SCREAMING_SNAKE_CASE = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) SCREAMING_SNAKE_CASE = torch.tensor([[-2.17_03e00, 2.11_07e00, -2.08_11e00]] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
16
1
from __future__ import annotations __A : List[str] = 1.6021e-19 # units = C def __a ( A__ : float , A__ : float , A__ : float , ): if (conductivity, electron_conc, mobility).count(0 ) != 1: raise ValueError("You cannot supply more or less than 2 values" ) elif conductivity < 0: raise ValueError("Conductivity cannot be negative" ) elif electron_conc < 0: raise ValueError("Electron concentration cannot be negative" ) elif mobility < 0: raise ValueError("mobility cannot be negative" ) elif conductivity == 0: return ( "conductivity", mobility * electron_conc * ELECTRON_CHARGE, ) elif electron_conc == 0: return ( "electron_conc", conductivity / (mobility * ELECTRON_CHARGE), ) else: return ( "mobility", conductivity / (electron_conc * ELECTRON_CHARGE), ) if __name__ == "__main__": import doctest doctest.testmod()
16
import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings __A : Optional[Any] = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = field(default=__snake_case , metadata={"help": "Whether to use SortishSampler or not."} ) lowerCamelCase__ = field( default=__snake_case , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} ) lowerCamelCase__ = field( default=__snake_case , metadata={ "help": ( "The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `max_length` value of the model configuration." ) } , ) lowerCamelCase__ = field( default=__snake_case , metadata={ "help": ( "The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `num_beams` value of the model configuration." ) } , ) lowerCamelCase__ = field( default=__snake_case , metadata={ "help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction." } , ) def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = super().to_dict() for k, v in d.items(): if isinstance(__lowerCamelCase , __lowerCamelCase ): SCREAMING_SNAKE_CASE = v.to_dict() return d
16
1
from collections.abc import Callable class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Optional[Any] , __lowerCamelCase : Callable | None = None ): # Stores actual heap items. SCREAMING_SNAKE_CASE = [] # Stores indexes of each item for supporting updates and deletion. SCREAMING_SNAKE_CASE = {} # Stores current size of heap. SCREAMING_SNAKE_CASE = 0 # Stores function used to evaluate the score of an item on which basis ordering # will be done. SCREAMING_SNAKE_CASE = key or (lambda __lowerCamelCase : x) def _snake_case ( self : Dict , __lowerCamelCase : int ): return int((i - 1) / 2 ) if i > 0 else None def _snake_case ( self : Optional[int] , __lowerCamelCase : int ): SCREAMING_SNAKE_CASE = int(2 * i + 1 ) return left if 0 < left < self.size else None def _snake_case ( self : Any , __lowerCamelCase : int ): SCREAMING_SNAKE_CASE = int(2 * i + 2 ) return right if 0 < right < self.size else None def _snake_case ( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : int ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = ( self.pos_map[self.arr[j][0]], self.pos_map[self.arr[i][0]], ) # Then swap the items in the list. SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.arr[j], self.arr[i] def _snake_case ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : int ): return self.arr[i][1] < self.arr[j][1] def _snake_case ( self : Tuple , __lowerCamelCase : int ): SCREAMING_SNAKE_CASE = self._left(__lowerCamelCase ) SCREAMING_SNAKE_CASE = self._right(__lowerCamelCase ) SCREAMING_SNAKE_CASE = i if left is not None and not self._cmp(__lowerCamelCase , __lowerCamelCase ): SCREAMING_SNAKE_CASE = left if right is not None and not self._cmp(__lowerCamelCase , __lowerCamelCase ): SCREAMING_SNAKE_CASE = right return valid_parent def _snake_case ( self : Any , __lowerCamelCase : int ): SCREAMING_SNAKE_CASE = self._parent(__lowerCamelCase ) while parent is not None and not self._cmp(__lowerCamelCase , __lowerCamelCase ): self._swap(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parent, self._parent(__lowerCamelCase ) def _snake_case ( self : str , __lowerCamelCase : int ): SCREAMING_SNAKE_CASE = self._get_valid_parent(__lowerCamelCase ) while valid_parent != index: self._swap(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = valid_parent, self._get_valid_parent(__lowerCamelCase ) def _snake_case ( self : str , __lowerCamelCase : int , __lowerCamelCase : int ): if item not in self.pos_map: return SCREAMING_SNAKE_CASE = self.pos_map[item] SCREAMING_SNAKE_CASE = [item, self.key(__lowerCamelCase )] # Make sure heap is right in both up and down direction. # Ideally only one of them will make any change. self._heapify_up(__lowerCamelCase ) self._heapify_down(__lowerCamelCase ) def _snake_case ( self : Tuple , __lowerCamelCase : int ): if item not in self.pos_map: return SCREAMING_SNAKE_CASE = self.pos_map[item] del self.pos_map[item] SCREAMING_SNAKE_CASE = self.arr[self.size - 1] SCREAMING_SNAKE_CASE = index self.size -= 1 # Make sure heap is right in both up and down direction. Ideally only one # of them will make any change- so no performance loss in calling both. if self.size > index: self._heapify_up(__lowerCamelCase ) self._heapify_down(__lowerCamelCase ) def _snake_case ( self : str , __lowerCamelCase : int , __lowerCamelCase : int ): SCREAMING_SNAKE_CASE = len(self.arr ) if arr_len == self.size: self.arr.append([item, self.key(__lowerCamelCase )] ) else: SCREAMING_SNAKE_CASE = [item, self.key(__lowerCamelCase )] SCREAMING_SNAKE_CASE = self.size self.size += 1 self._heapify_up(self.size - 1 ) def _snake_case ( self : str ): return self.arr[0] if self.size else None def _snake_case ( self : List[Any] ): SCREAMING_SNAKE_CASE = self.get_top() if top_item_tuple: self.delete_item(top_item_tuple[0] ) return top_item_tuple def __a ( ): pass if __name__ == "__main__": import doctest doctest.testmod()
16
import os def __a ( ): SCREAMING_SNAKE_CASE = os.path.join(os.path.dirname(A__ ) , "num.txt" ) with open(A__ ) as file_hand: return str(sum(int(A__ ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
16
1
import json import multiprocessing import os import re from collections import defaultdict import torch from accelerate import Accelerator from accelerate.utils import set_seed from arguments import HumanEvalArguments from datasets import load_dataset, load_metric from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from tqdm import tqdm import transformers from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList __A : Union[str, Any] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif'] class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' def __init__( self : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple=None , __lowerCamelCase : Any=1 ): SCREAMING_SNAKE_CASE = tokenizer SCREAMING_SNAKE_CASE = dataset SCREAMING_SNAKE_CASE = len(__lowerCamelCase ) if n_tasks is None else n_tasks SCREAMING_SNAKE_CASE = n_copies def __iter__( self : Dict ): SCREAMING_SNAKE_CASE = [] for task in range(self.n_tasks ): # without strip, the model generate commented codes ... prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() ) SCREAMING_SNAKE_CASE = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" ) for task in range(self.n_tasks ): for _ in range(self.n_copies ): yield { "ids": outputs.input_ids[task], "task_id": task, "input_len": outputs.attention_mask[task].sum(), } class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' def __init__( self : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : int ): SCREAMING_SNAKE_CASE = start_length SCREAMING_SNAKE_CASE = eof_strings SCREAMING_SNAKE_CASE = tokenizer def __call__( self : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Any ): SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(input_ids[:, self.start_length :] ) SCREAMING_SNAKE_CASE = [] for decoded_generation in decoded_generations: done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) ) return all(__lowerCamelCase ) def __a ( A__ : Optional[Any] ): SCREAMING_SNAKE_CASE = re.split("(%s)" % "|".join(A__ ) , A__ ) # last string should be "" return "".join(string_list[:-2] ) def __a ( A__ : Any , A__ : List[str] , A__ : Dict , A__ : Dict , A__ : int , A__ : List[Any]=20 , **A__ : Tuple ): SCREAMING_SNAKE_CASE = defaultdict(A__ ) # dict of list of generated tokens for step, batch in tqdm(enumerate(A__ ) ): with torch.no_grad(): SCREAMING_SNAKE_CASE = batch["ids"].shape[-1] SCREAMING_SNAKE_CASE = accelerator.unwrap_model(A__ ).generate( input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=A__ , **A__ ) # each task is generated batch_size times SCREAMING_SNAKE_CASE = batch["task_id"].repeat(A__ ) SCREAMING_SNAKE_CASE = accelerator.pad_across_processes( A__ , dim=1 , pad_index=tokenizer.pad_token_id ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather((generated_tokens, generated_tasks) ) SCREAMING_SNAKE_CASE = generated_tokens.cpu().numpy() SCREAMING_SNAKE_CASE = generated_tasks.cpu().numpy() for task, generated_tokens in zip(A__ , A__ ): gen_token_dict[task].append(A__ ) SCREAMING_SNAKE_CASE = [[] for _ in range(A__ )] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: SCREAMING_SNAKE_CASE = tokenizer.decode(A__ , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ ) code_gens[task].append(remove_last_block(A__ ) ) return code_gens def __a ( ): # Setup configuration SCREAMING_SNAKE_CASE = HfArgumentParser(A__ ) SCREAMING_SNAKE_CASE = parser.parse_args() transformers.logging.set_verbosity_error() # enables code execution in code_eval metric SCREAMING_SNAKE_CASE = args.HF_ALLOW_CODE_EVAL # make sure tokenizer plays nice with multiprocessing SCREAMING_SNAKE_CASE = "false" if args.num_workers is None: SCREAMING_SNAKE_CASE = multiprocessing.cpu_count() # Use dataset load to feed to accelerate SCREAMING_SNAKE_CASE = Accelerator() set_seed(args.seed , device_specific=A__ ) # Load model and tokenizer SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(args.model_ckpt ) SCREAMING_SNAKE_CASE = tokenizer.eos_token SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(args.model_ckpt ) # Generation settings SCREAMING_SNAKE_CASE = { "do_sample": args.do_sample, "temperature": args.temperature, "max_new_tokens": args.max_new_tokens, "top_p": args.top_p, "top_k": args.top_k, "stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , A__ , A__ )] ), } # Load evaluation dataset and metric SCREAMING_SNAKE_CASE = load_dataset("openai_humaneval" ) SCREAMING_SNAKE_CASE = load_metric("code_eval" ) SCREAMING_SNAKE_CASE = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] ) SCREAMING_SNAKE_CASE = args.n_samples // args.batch_size SCREAMING_SNAKE_CASE = TokenizedDataset(A__ , human_eval["test"] , n_copies=A__ , n_tasks=A__ ) # do not confuse args.batch_size, which is actually the num_return_sequences SCREAMING_SNAKE_CASE = DataLoader(A__ , batch_size=1 ) # Run a quick test to see if code evaluation is enabled try: SCREAMING_SNAKE_CASE = code_eval_metric.compute(references=[""] , predictions=[[""]] ) except ValueError as exception: print( "Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`" " flag to enable code evaluation." ) raise exception SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(A__ , A__ ) SCREAMING_SNAKE_CASE = complete_code( A__ , A__ , A__ , A__ , n_tasks=A__ , batch_size=args.batch_size , **A__ , ) if accelerator.is_main_process: SCREAMING_SNAKE_CASE = [] for task in tqdm(range(A__ ) ): SCREAMING_SNAKE_CASE = human_eval["test"][task]["test"] SCREAMING_SNAKE_CASE = F"check({human_eval['test'][task]['entry_point']})" references.append("\n" + test_func + "\n" + entry_point ) # Evaluate completions with "code_eval" metric SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = code_eval_metric.compute( references=A__ , predictions=A__ , num_workers=args.num_workers ) print(F"Results: {pass_at_k}" ) # Save results to json file with open(args.output_file , "w" ) as fp: json.dump(A__ , A__ ) # For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing # https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script if __name__ == "__main__": main()
16
import pytest __A : Optional[Any] = '__dummy_dataset1__' __A : Optional[int] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n' @pytest.fixture def __a ( ): return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def __a ( ): return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def __a ( A__ : Optional[Any] , A__ : List[str] , A__ : Optional[int] ): SCREAMING_SNAKE_CASE = dataset_loading_script_name SCREAMING_SNAKE_CASE = tmp_path / "datasets" / script_name script_dir.mkdir(parents=A__ ) SCREAMING_SNAKE_CASE = script_dir / F"{script_name}.py" with open(A__ , "w" ) as f: f.write(A__ ) return str(A__ )
16
1
from typing import Any import numpy as np def __a ( A__ : np.ndarray ): return np.array_equal(A__ , matrix.conjugate().T ) def __a ( A__ : np.ndarray , A__ : np.ndarray ): SCREAMING_SNAKE_CASE = v.conjugate().T SCREAMING_SNAKE_CASE = v_star.dot(A__ ) assert isinstance(A__ , np.ndarray ) return (v_star_dot.dot(A__ )) / (v_star.dot(A__ )) def __a ( ): SCREAMING_SNAKE_CASE = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] ) SCREAMING_SNAKE_CASE = np.array([[1], [2], [3]] ) assert is_hermitian(A__ ), F"{a} is not hermitian." print(rayleigh_quotient(A__ , A__ ) ) SCREAMING_SNAKE_CASE = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(A__ ), F"{a} is not hermitian." assert rayleigh_quotient(A__ , A__ ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
16
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert import BertTokenizer __A : str = logging.get_logger(__name__) __A : Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} __A : Tuple = { 'vocab_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } __A : Optional[Any] = { 'vocab_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } __A : str = { 'vocab_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json' ), }, } __A : Optional[Any] = { 'facebook/dpr-ctx_encoder-single-nq-base': 5_1_2, 'facebook/dpr-ctx_encoder-multiset-base': 5_1_2, } __A : List[str] = { 'facebook/dpr-question_encoder-single-nq-base': 5_1_2, 'facebook/dpr-question_encoder-multiset-base': 5_1_2, } __A : Any = { 'facebook/dpr-reader-single-nq-base': 5_1_2, 'facebook/dpr-reader-multiset-base': 5_1_2, } __A : str = { 'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True}, } __A : Any = { 'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True}, } __A : Dict = { 'facebook/dpr-reader-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-reader-multiset-base': {'do_lower_case': True}, } class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION __A : Optional[int] = collections.namedtuple( 'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text'] ) __A : List[Any] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits']) __A : List[Any] = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n ' @add_start_docstrings(__snake_case ) class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __call__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Union[bool, str] = False , __lowerCamelCase : Union[bool, str] = False , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : Optional[bool] = None , **__lowerCamelCase : Any , ): if titles is None and texts is None: return super().__call__( __lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , ) elif titles is None or texts is None: SCREAMING_SNAKE_CASE = titles if texts is None else texts return super().__call__( __lowerCamelCase , __lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , ) SCREAMING_SNAKE_CASE = titles if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [titles] SCREAMING_SNAKE_CASE = texts if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [texts] SCREAMING_SNAKE_CASE = len(__lowerCamelCase ) SCREAMING_SNAKE_CASE = questions if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [questions] * n_passages if len(__lowerCamelCase ) != len(__lowerCamelCase ): raise ValueError( f"There should be as many titles than texts but got {len(__lowerCamelCase )} titles and {len(__lowerCamelCase )} texts." ) SCREAMING_SNAKE_CASE = super().__call__(__lowerCamelCase , __lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase )["input_ids"] SCREAMING_SNAKE_CASE = super().__call__(__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase )["input_ids"] SCREAMING_SNAKE_CASE = { "input_ids": [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(__lowerCamelCase , __lowerCamelCase ) ] } if return_attention_mask is not False: SCREAMING_SNAKE_CASE = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) SCREAMING_SNAKE_CASE = attention_mask return self.pad(__lowerCamelCase , padding=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase ) def _snake_case ( self : Tuple , __lowerCamelCase : BatchEncoding , __lowerCamelCase : DPRReaderOutput , __lowerCamelCase : int = 16 , __lowerCamelCase : int = 64 , __lowerCamelCase : int = 4 , ): SCREAMING_SNAKE_CASE = reader_input["input_ids"] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = reader_output[:3] SCREAMING_SNAKE_CASE = len(__lowerCamelCase ) SCREAMING_SNAKE_CASE = sorted(range(__lowerCamelCase ) , reverse=__lowerCamelCase , key=relevance_logits.__getitem__ ) SCREAMING_SNAKE_CASE = [] for doc_id in sorted_docs: SCREAMING_SNAKE_CASE = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence SCREAMING_SNAKE_CASE = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: SCREAMING_SNAKE_CASE = sequence_ids.index(self.pad_token_id ) else: SCREAMING_SNAKE_CASE = len(__lowerCamelCase ) SCREAMING_SNAKE_CASE = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__lowerCamelCase , top_spans=__lowerCamelCase , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__lowerCamelCase , start_index=__lowerCamelCase , end_index=__lowerCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(__lowerCamelCase ) >= num_spans: break return nbest_spans_predictions[:num_spans] def _snake_case ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : List[int] , __lowerCamelCase : int , __lowerCamelCase : int , ): SCREAMING_SNAKE_CASE = [] for start_index, start_score in enumerate(__lowerCamelCase ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) SCREAMING_SNAKE_CASE = sorted(__lowerCamelCase , key=lambda __lowerCamelCase : x[1] , reverse=__lowerCamelCase ) SCREAMING_SNAKE_CASE = [] for (start_index, end_index), score in scores: if start_index > end_index: raise ValueError(f"Wrong span indices: [{start_index}:{end_index}]" ) SCREAMING_SNAKE_CASE = end_index - start_index + 1 if length > max_answer_length: raise ValueError(f"Span is too long: {length} > {max_answer_length}" ) if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(__lowerCamelCase ) == top_spans: break return chosen_span_intervals @add_end_docstrings(__snake_case ) class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ): '''simple docstring''' lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ = READER_PRETRAINED_INIT_CONFIGURATION lowerCamelCase__ = ["input_ids", "attention_mask"]
16
1
import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand __A : Tuple = ( '4S 3H 2C 7S 5H', '9D 8H 2C 6S 7H', '2D 6D 9D TH 7D', 'TC 8C 2S JH 6C', 'JH 8S TH AH QH', 'TS KS 5S 9S AC', 'KD 6S 9D TH AD', 'KS 8D 4D 9S 4S', # pair '8C 4S KH JS 4D', # pair 'QH 8H KD JH 8S', # pair 'KC 4H KS 2H 8D', # pair 'KD 4S KC 3H 8S', # pair 'AH 8S AS KC JH', # pair '3H 4C 4H 3S 2H', # 2 pairs '5S 5D 2C KH KH', # 2 pairs '3C KH 5D 5S KH', # 2 pairs 'AS 3C KH AD KH', # 2 pairs '7C 7S 3S 7H 5S', # 3 of a kind '7C 7S KH 2H 7H', # 3 of a kind 'AC KH QH AH AS', # 3 of a kind '2H 4D 3C AS 5S', # straight (low ace) '3C 5C 4C 2C 6H', # straight '6S 8S 7S 5H 9H', # straight 'JS QS 9H TS KH', # straight 'QC KH TS JS AH', # straight (high ace) '8C 9C 5C 3C TC', # flush '3S 8S 9S 5S KS', # flush '4C 5C 9C 8C KC', # flush 'JH 8H AH KH QH', # flush '3D 2H 3H 2C 2D', # full house '2H 2C 3S 3H 3D', # full house 'KH KC 3S 3H 3D', # full house 'JC 6H JS JD JH', # 4 of a kind 'JC 7H JS JD JH', # 4 of a kind 'JC KH JS JD JH', # 4 of a kind '2S AS 4S 5S 3S', # straight flush (low ace) '2D 6D 3D 4D 5D', # straight flush '5C 6C 3C 7C 4C', # straight flush 'JH 9H TH KH QH', # straight flush 'JH AH TH KH QH', # royal flush (high ace straight flush) ) __A : Union[str, Any] = ( ('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'), ('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'), ('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'), ('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'), ('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'), ('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'), ('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'), ('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'), ('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'), ('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'), ('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'), ('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'), ('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'), ('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'), ('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'), ('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'), ('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'), ('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'), ('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'), ('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'), ('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'), ('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'), ('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'), ('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'), ('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'), ('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'), ('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'), ('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'), ('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'), ('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'), ('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'), ('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'), ('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'), ) __A : List[Any] = ( ('2H 3H 4H 5H 6H', True), ('AS AH 2H AD AC', False), ('2H 3H 5H 6H 7H', True), ('KS AS TS QS JS', True), ('8H 9H QS JS TH', False), ('AS 3S 4S 8S 2S', True), ) __A : int = ( ('2H 3H 4H 5H 6H', True), ('AS AH 2H AD AC', False), ('2H 3H 5H 6H 7H', False), ('KS AS TS QS JS', True), ('8H 9H QS JS TH', True), ) __A : Any = ( ('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 1_4]), ('2H 5D 3C AS 5S', False, [1_4, 5, 5, 3, 2]), ('JH QD KC AS TS', False, [1_4, 1_3, 1_2, 1_1, 1_0]), ('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]), ) __A : List[Any] = ( ('JH AH TH KH QH', 0), ('JH 9H TH KH QH', 0), ('JC KH JS JD JH', 7), ('KH KC 3S 3H 3D', 6), ('8C 9C 5C 3C TC', 0), ('JS QS 9H TS KH', 0), ('7C 7S KH 2H 7H', 3), ('3C KH 5D 5S KH', 2), ('QH 8H KD JH 8S', 1), ('2D 6D 9D TH 7D', 0), ) __A : str = ( ('JH AH TH KH QH', 2_3), ('JH 9H TH KH QH', 2_2), ('JC KH JS JD JH', 2_1), ('KH KC 3S 3H 3D', 2_0), ('8C 9C 5C 3C TC', 1_9), ('JS QS 9H TS KH', 1_8), ('7C 7S KH 2H 7H', 1_7), ('3C KH 5D 5S KH', 1_6), ('QH 8H KD JH 8S', 1_5), ('2D 6D 9D TH 7D', 1_4), ) def __a ( ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = randrange(len(A__ ) ), randrange(len(A__ ) ) SCREAMING_SNAKE_CASE = ["Loss", "Tie", "Win"][(play >= oppo) + (play > oppo)] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def __a ( A__ : int = 100 ): return (generate_random_hand() for _ in range(A__ )) @pytest.mark.parametrize("hand, expected" , A__ ) def __a ( A__ : str , A__ : List[Any] ): assert PokerHand(A__ )._is_flush() == expected @pytest.mark.parametrize("hand, expected" , A__ ) def __a ( A__ : Optional[Any] , A__ : int ): assert PokerHand(A__ )._is_straight() == expected @pytest.mark.parametrize("hand, expected, card_values" , A__ ) def __a ( A__ : Any , A__ : List[str] , A__ : Any ): SCREAMING_SNAKE_CASE = PokerHand(A__ ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize("hand, expected" , A__ ) def __a ( A__ : Optional[int] , A__ : str ): assert PokerHand(A__ )._is_same_kind() == expected @pytest.mark.parametrize("hand, expected" , A__ ) def __a ( A__ : str , A__ : Optional[int] ): assert PokerHand(A__ )._hand_type == expected @pytest.mark.parametrize("hand, other, expected" , A__ ) def __a ( A__ : Union[str, Any] , A__ : str , A__ : Optional[Any] ): assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected @pytest.mark.parametrize("hand, other, expected" , generate_random_hands() ) def __a ( A__ : List[Any] , A__ : Optional[Any] , A__ : List[Any] ): assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected def __a ( ): SCREAMING_SNAKE_CASE = [PokerHand(A__ ) for hand in SORTED_HANDS] SCREAMING_SNAKE_CASE = poker_hands.copy() shuffle(A__ ) SCREAMING_SNAKE_CASE = chain(sorted(A__ ) ) for index, hand in enumerate(A__ ): assert hand == poker_hands[index] def __a ( ): # Test that five high straights are compared correctly. SCREAMING_SNAKE_CASE = [PokerHand("2D AC 3H 4H 5S" ), PokerHand("2S 3H 4H 5S 6C" )] pokerhands.sort(reverse=A__ ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def __a ( ): # Multiple calls to five_high_straight function should still return True # and shouldn't mutate the list in every call other than the first. SCREAMING_SNAKE_CASE = PokerHand("2C 4S AS 3D 5C" ) SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def __a ( ): # Problem number 54 from Project Euler # Testing from poker_hands.txt file SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(A__ ) ) SCREAMING_SNAKE_CASE = os.path.join(A__ , "poker_hands.txt" ) with open(A__ ) as file_hand: for line in file_hand: SCREAMING_SNAKE_CASE = line[:14].strip() SCREAMING_SNAKE_CASE = line[15:].strip() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = PokerHand(A__ ), PokerHand(A__ ) SCREAMING_SNAKE_CASE = player.compare_with(A__ ) if output == "Win": answer += 1 assert answer == 376
16
from typing import Any import numpy as np def __a ( A__ : np.ndarray ): return np.array_equal(A__ , matrix.conjugate().T ) def __a ( A__ : np.ndarray , A__ : np.ndarray ): SCREAMING_SNAKE_CASE = v.conjugate().T SCREAMING_SNAKE_CASE = v_star.dot(A__ ) assert isinstance(A__ , np.ndarray ) return (v_star_dot.dot(A__ )) / (v_star.dot(A__ )) def __a ( ): SCREAMING_SNAKE_CASE = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] ) SCREAMING_SNAKE_CASE = np.array([[1], [2], [3]] ) assert is_hermitian(A__ ), F"{a} is not hermitian." print(rayleigh_quotient(A__ , A__ ) ) SCREAMING_SNAKE_CASE = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(A__ ), F"{a} is not hermitian." assert rayleigh_quotient(A__ , A__ ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
16
1
from numpy import exp, pi, sqrt def __a ( A__ : int , A__ : float = 0.0 , A__ : float = 1.0 ): return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
16
from __future__ import annotations __A : str = list[tuple[int, int]] __A : Optional[int] = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] __A : List[str] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : str , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float , __lowerCamelCase : Node | None , ): SCREAMING_SNAKE_CASE = pos_x SCREAMING_SNAKE_CASE = pos_y SCREAMING_SNAKE_CASE = (pos_y, pos_x) SCREAMING_SNAKE_CASE = goal_x SCREAMING_SNAKE_CASE = goal_y SCREAMING_SNAKE_CASE = g_cost SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = self.calculate_heuristic() def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = abs(self.pos_x - self.goal_x ) SCREAMING_SNAKE_CASE = abs(self.pos_y - self.goal_y ) return dx + dy def __lt__( self : Union[str, Any] , __lowerCamelCase : List[Any] ): return self.f_cost < other.f_cost class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Optional[int] , __lowerCamelCase : tuple[int, int] , __lowerCamelCase : tuple[int, int] ): SCREAMING_SNAKE_CASE = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __lowerCamelCase ) SCREAMING_SNAKE_CASE = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , __lowerCamelCase ) SCREAMING_SNAKE_CASE = [self.start] SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = False def _snake_case ( self : Optional[Any] ): while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() SCREAMING_SNAKE_CASE = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: SCREAMING_SNAKE_CASE = True return self.retrace_path(__lowerCamelCase ) self.closed_nodes.append(__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.get_successors(__lowerCamelCase ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(__lowerCamelCase ) else: # retrieve the best current path SCREAMING_SNAKE_CASE = self.open_nodes.pop(self.open_nodes.index(__lowerCamelCase ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(__lowerCamelCase ) else: self.open_nodes.append(__lowerCamelCase ) if not self.reached: return [self.start.pos] return None def _snake_case ( self : List[Any] , __lowerCamelCase : Node ): SCREAMING_SNAKE_CASE = [] for action in delta: SCREAMING_SNAKE_CASE = parent.pos_x + action[1] SCREAMING_SNAKE_CASE = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowerCamelCase ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( __lowerCamelCase , __lowerCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __lowerCamelCase , ) ) return successors def _snake_case ( self : str , __lowerCamelCase : Node | None ): SCREAMING_SNAKE_CASE = node SCREAMING_SNAKE_CASE = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) SCREAMING_SNAKE_CASE = current_node.parent path.reverse() return path if __name__ == "__main__": __A : Optional[Any] = (0, 0) __A : Optional[int] = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print('------') __A : List[str] = GreedyBestFirst(init, goal) __A : Tuple = greedy_bf.search() if path: for pos_x, pos_y in path: __A : Optional[Any] = 2 for elem in grid: print(elem)
16
1
from dataclasses import dataclass from typing import Dict, Optional, Union import torch import torch.nn.functional as F from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .attention_processor import AttentionProcessor, AttnProcessor from .embeddings import TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin @dataclass class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = 42 class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ): '''simple docstring''' @register_to_config def __init__( self : Union[str, Any] , __lowerCamelCase : int = 32 , __lowerCamelCase : int = 64 , __lowerCamelCase : int = 20 , __lowerCamelCase : int = 768 , __lowerCamelCase : str=77 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : float = 0.0 , __lowerCamelCase : str = "silu" , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = "linear" , __lowerCamelCase : Optional[str] = "prd" , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , ): super().__init__() SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = attention_head_dim SCREAMING_SNAKE_CASE = num_attention_heads * attention_head_dim SCREAMING_SNAKE_CASE = additional_embeddings SCREAMING_SNAKE_CASE = time_embed_dim or inner_dim SCREAMING_SNAKE_CASE = embedding_proj_dim or embedding_dim SCREAMING_SNAKE_CASE = clip_embed_dim or embedding_dim SCREAMING_SNAKE_CASE = Timesteps(__lowerCamelCase , __lowerCamelCase , 0 ) SCREAMING_SNAKE_CASE = TimestepEmbedding(__lowerCamelCase , __lowerCamelCase , out_dim=__lowerCamelCase , act_fn=__lowerCamelCase ) SCREAMING_SNAKE_CASE = nn.Linear(__lowerCamelCase , __lowerCamelCase ) if embedding_proj_norm_type is None: SCREAMING_SNAKE_CASE = None elif embedding_proj_norm_type == "layer": SCREAMING_SNAKE_CASE = nn.LayerNorm(__lowerCamelCase ) else: raise ValueError(f"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}" ) SCREAMING_SNAKE_CASE = nn.Linear(__lowerCamelCase , __lowerCamelCase ) if encoder_hid_proj_type is None: SCREAMING_SNAKE_CASE = None elif encoder_hid_proj_type == "linear": SCREAMING_SNAKE_CASE = nn.Linear(__lowerCamelCase , __lowerCamelCase ) else: raise ValueError(f"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}" ) SCREAMING_SNAKE_CASE = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , __lowerCamelCase ) ) if added_emb_type == "prd": SCREAMING_SNAKE_CASE = nn.Parameter(torch.zeros(1 , 1 , __lowerCamelCase ) ) elif added_emb_type is None: SCREAMING_SNAKE_CASE = None else: raise ValueError( f"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`." ) SCREAMING_SNAKE_CASE = nn.ModuleList( [ BasicTransformerBlock( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , dropout=__lowerCamelCase , activation_fn="gelu" , attention_bias=__lowerCamelCase , ) for d in range(__lowerCamelCase ) ] ) if norm_in_type == "layer": SCREAMING_SNAKE_CASE = nn.LayerNorm(__lowerCamelCase ) elif norm_in_type is None: SCREAMING_SNAKE_CASE = None else: raise ValueError(f"Unsupported norm_in_type: {norm_in_type}." ) SCREAMING_SNAKE_CASE = nn.LayerNorm(__lowerCamelCase ) SCREAMING_SNAKE_CASE = nn.Linear(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = torch.full( [num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10_000.0 ) causal_attention_mask.triu_(1 ) SCREAMING_SNAKE_CASE = causal_attention_mask[None, ...] self.register_buffer("causal_attention_mask" , __lowerCamelCase , persistent=__lowerCamelCase ) SCREAMING_SNAKE_CASE = nn.Parameter(torch.zeros(1 , __lowerCamelCase ) ) SCREAMING_SNAKE_CASE = nn.Parameter(torch.zeros(1 , __lowerCamelCase ) ) @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = {} def fn_recursive_add_processors(__lowerCamelCase : str , __lowerCamelCase : torch.nn.Module , __lowerCamelCase : Dict[str, AttentionProcessor] ): if hasattr(__lowerCamelCase , "set_processor" ): SCREAMING_SNAKE_CASE = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}" , __lowerCamelCase , __lowerCamelCase ) return processors for name, module in self.named_children(): fn_recursive_add_processors(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) return processors def _snake_case ( self : str , __lowerCamelCase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ): SCREAMING_SNAKE_CASE = len(self.attn_processors.keys() ) if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(__lowerCamelCase )} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(__lowerCamelCase : str , __lowerCamelCase : torch.nn.Module , __lowerCamelCase : Union[str, Any] ): if hasattr(__lowerCamelCase , "set_processor" ): if not isinstance(__lowerCamelCase , __lowerCamelCase ): module.set_processor(__lowerCamelCase ) else: module.set_processor(processor.pop(f"{name}.processor" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}" , __lowerCamelCase , __lowerCamelCase ) for name, module in self.named_children(): fn_recursive_attn_processor(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def _snake_case ( self : Optional[int] ): self.set_attn_processor(AttnProcessor() ) def _snake_case ( self : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Union[torch.Tensor, float, int] , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : Optional[torch.BoolTensor] = None , __lowerCamelCase : bool = True , ): SCREAMING_SNAKE_CASE = hidden_states.shape[0] SCREAMING_SNAKE_CASE = timestep if not torch.is_tensor(__lowerCamelCase ): SCREAMING_SNAKE_CASE = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device ) elif torch.is_tensor(__lowerCamelCase ) and len(timesteps.shape ) == 0: SCREAMING_SNAKE_CASE = timesteps[None].to(hidden_states.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML SCREAMING_SNAKE_CASE = timesteps * torch.ones(__lowerCamelCase , dtype=timesteps.dtype , device=timesteps.device ) SCREAMING_SNAKE_CASE = self.time_proj(__lowerCamelCase ) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might be fp16, so we need to cast here. SCREAMING_SNAKE_CASE = timesteps_projected.to(dtype=self.dtype ) SCREAMING_SNAKE_CASE = self.time_embedding(__lowerCamelCase ) if self.embedding_proj_norm is not None: SCREAMING_SNAKE_CASE = self.embedding_proj_norm(__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.embedding_proj(__lowerCamelCase ) if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None: SCREAMING_SNAKE_CASE = self.encoder_hidden_states_proj(__lowerCamelCase ) elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None: raise ValueError("`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set" ) SCREAMING_SNAKE_CASE = self.proj_in(__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.positional_embedding.to(hidden_states.dtype ) SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = 0 if encoder_hidden_states is not None: additional_embeds.append(__lowerCamelCase ) additional_embeddings_len += encoder_hidden_states.shape[1] if len(proj_embeddings.shape ) == 2: SCREAMING_SNAKE_CASE = proj_embeddings[:, None, :] if len(hidden_states.shape ) == 2: SCREAMING_SNAKE_CASE = hidden_states[:, None, :] SCREAMING_SNAKE_CASE = additional_embeds + [ proj_embeddings, time_embeddings[:, None, :], hidden_states, ] if self.prd_embedding is not None: SCREAMING_SNAKE_CASE = self.prd_embedding.to(hidden_states.dtype ).expand(__lowerCamelCase , -1 , -1 ) additional_embeds.append(__lowerCamelCase ) SCREAMING_SNAKE_CASE = torch.cat( __lowerCamelCase , dim=1 , ) # Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens SCREAMING_SNAKE_CASE = additional_embeddings_len + proj_embeddings.shape[1] + 1 if positional_embeddings.shape[1] < hidden_states.shape[1]: SCREAMING_SNAKE_CASE = F.pad( __lowerCamelCase , ( 0, 0, additional_embeddings_len, self.prd_embedding.shape[1] if self.prd_embedding is not None else 0, ) , value=0.0 , ) SCREAMING_SNAKE_CASE = hidden_states + positional_embeddings if attention_mask is not None: SCREAMING_SNAKE_CASE = (1 - attention_mask.to(hidden_states.dtype )) * -10_000.0 SCREAMING_SNAKE_CASE = F.pad(__lowerCamelCase , (0, self.additional_embeddings) , value=0.0 ) SCREAMING_SNAKE_CASE = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype ) SCREAMING_SNAKE_CASE = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 ) if self.norm_in is not None: SCREAMING_SNAKE_CASE = self.norm_in(__lowerCamelCase ) for block in self.transformer_blocks: SCREAMING_SNAKE_CASE = block(__lowerCamelCase , attention_mask=__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.norm_out(__lowerCamelCase ) if self.prd_embedding is not None: SCREAMING_SNAKE_CASE = hidden_states[:, -1] else: SCREAMING_SNAKE_CASE = hidden_states[:, additional_embeddings_len:] SCREAMING_SNAKE_CASE = self.proj_to_clip_embeddings(__lowerCamelCase ) if not return_dict: return (predicted_image_embedding,) return PriorTransformerOutput(predicted_image_embedding=__lowerCamelCase ) def _snake_case ( self : str , __lowerCamelCase : str ): SCREAMING_SNAKE_CASE = (prior_latents * self.clip_std) + self.clip_mean return prior_latents
16
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES __A : int = logging.get_logger(__name__) __A : List[str] = OrderedDict( [ # Base model mapping ('albert', 'FlaxAlbertModel'), ('bart', 'FlaxBartModel'), ('beit', 'FlaxBeitModel'), ('bert', 'FlaxBertModel'), ('big_bird', 'FlaxBigBirdModel'), ('blenderbot', 'FlaxBlenderbotModel'), ('blenderbot-small', 'FlaxBlenderbotSmallModel'), ('clip', 'FlaxCLIPModel'), ('distilbert', 'FlaxDistilBertModel'), ('electra', 'FlaxElectraModel'), ('gpt-sw3', 'FlaxGPT2Model'), ('gpt2', 'FlaxGPT2Model'), ('gpt_neo', 'FlaxGPTNeoModel'), ('gptj', 'FlaxGPTJModel'), ('longt5', 'FlaxLongT5Model'), ('marian', 'FlaxMarianModel'), ('mbart', 'FlaxMBartModel'), ('mt5', 'FlaxMT5Model'), ('opt', 'FlaxOPTModel'), ('pegasus', 'FlaxPegasusModel'), ('regnet', 'FlaxRegNetModel'), ('resnet', 'FlaxResNetModel'), ('roberta', 'FlaxRobertaModel'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'), ('roformer', 'FlaxRoFormerModel'), ('t5', 'FlaxT5Model'), ('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'), ('vit', 'FlaxViTModel'), ('wav2vec2', 'FlaxWav2Vec2Model'), ('whisper', 'FlaxWhisperModel'), ('xglm', 'FlaxXGLMModel'), ('xlm-roberta', 'FlaxXLMRobertaModel'), ] ) __A : Optional[Any] = OrderedDict( [ # Model for pre-training mapping ('albert', 'FlaxAlbertForPreTraining'), ('bart', 'FlaxBartForConditionalGeneration'), ('bert', 'FlaxBertForPreTraining'), ('big_bird', 'FlaxBigBirdForPreTraining'), ('electra', 'FlaxElectraForPreTraining'), ('longt5', 'FlaxLongT5ForConditionalGeneration'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('mt5', 'FlaxMT5ForConditionalGeneration'), ('roberta', 'FlaxRobertaForMaskedLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'), ('roformer', 'FlaxRoFormerForMaskedLM'), ('t5', 'FlaxT5ForConditionalGeneration'), ('wav2vec2', 'FlaxWav2Vec2ForPreTraining'), ('whisper', 'FlaxWhisperForConditionalGeneration'), ('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'), ] ) __A : Tuple = OrderedDict( [ # Model for Masked LM mapping ('albert', 'FlaxAlbertForMaskedLM'), ('bart', 'FlaxBartForConditionalGeneration'), ('bert', 'FlaxBertForMaskedLM'), ('big_bird', 'FlaxBigBirdForMaskedLM'), ('distilbert', 'FlaxDistilBertForMaskedLM'), ('electra', 'FlaxElectraForMaskedLM'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('roberta', 'FlaxRobertaForMaskedLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'), ('roformer', 'FlaxRoFormerForMaskedLM'), ('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'), ] ) __A : Dict = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ('bart', 'FlaxBartForConditionalGeneration'), ('blenderbot', 'FlaxBlenderbotForConditionalGeneration'), ('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'), ('encoder-decoder', 'FlaxEncoderDecoderModel'), ('longt5', 'FlaxLongT5ForConditionalGeneration'), ('marian', 'FlaxMarianMTModel'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('mt5', 'FlaxMT5ForConditionalGeneration'), ('pegasus', 'FlaxPegasusForConditionalGeneration'), ('t5', 'FlaxT5ForConditionalGeneration'), ] ) __A : Any = OrderedDict( [ # Model for Image-classsification ('beit', 'FlaxBeitForImageClassification'), ('regnet', 'FlaxRegNetForImageClassification'), ('resnet', 'FlaxResNetForImageClassification'), ('vit', 'FlaxViTForImageClassification'), ] ) __A : Optional[int] = OrderedDict( [ ('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'), ] ) __A : Union[str, Any] = OrderedDict( [ # Model for Causal LM mapping ('bart', 'FlaxBartForCausalLM'), ('bert', 'FlaxBertForCausalLM'), ('big_bird', 'FlaxBigBirdForCausalLM'), ('electra', 'FlaxElectraForCausalLM'), ('gpt-sw3', 'FlaxGPT2LMHeadModel'), ('gpt2', 'FlaxGPT2LMHeadModel'), ('gpt_neo', 'FlaxGPTNeoForCausalLM'), ('gptj', 'FlaxGPTJForCausalLM'), ('opt', 'FlaxOPTForCausalLM'), ('roberta', 'FlaxRobertaForCausalLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'), ('xglm', 'FlaxXGLMForCausalLM'), ('xlm-roberta', 'FlaxXLMRobertaForCausalLM'), ] ) __A : Optional[int] = OrderedDict( [ # Model for Sequence Classification mapping ('albert', 'FlaxAlbertForSequenceClassification'), ('bart', 'FlaxBartForSequenceClassification'), ('bert', 'FlaxBertForSequenceClassification'), ('big_bird', 'FlaxBigBirdForSequenceClassification'), ('distilbert', 'FlaxDistilBertForSequenceClassification'), ('electra', 'FlaxElectraForSequenceClassification'), ('mbart', 'FlaxMBartForSequenceClassification'), ('roberta', 'FlaxRobertaForSequenceClassification'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'), ('roformer', 'FlaxRoFormerForSequenceClassification'), ('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'), ] ) __A : str = OrderedDict( [ # Model for Question Answering mapping ('albert', 'FlaxAlbertForQuestionAnswering'), ('bart', 'FlaxBartForQuestionAnswering'), ('bert', 'FlaxBertForQuestionAnswering'), ('big_bird', 'FlaxBigBirdForQuestionAnswering'), ('distilbert', 'FlaxDistilBertForQuestionAnswering'), ('electra', 'FlaxElectraForQuestionAnswering'), ('mbart', 'FlaxMBartForQuestionAnswering'), ('roberta', 'FlaxRobertaForQuestionAnswering'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'), ('roformer', 'FlaxRoFormerForQuestionAnswering'), ('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'), ] ) __A : Dict = OrderedDict( [ # Model for Token Classification mapping ('albert', 'FlaxAlbertForTokenClassification'), ('bert', 'FlaxBertForTokenClassification'), ('big_bird', 'FlaxBigBirdForTokenClassification'), ('distilbert', 'FlaxDistilBertForTokenClassification'), ('electra', 'FlaxElectraForTokenClassification'), ('roberta', 'FlaxRobertaForTokenClassification'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'), ('roformer', 'FlaxRoFormerForTokenClassification'), ('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'), ] ) __A : Dict = OrderedDict( [ # Model for Multiple Choice mapping ('albert', 'FlaxAlbertForMultipleChoice'), ('bert', 'FlaxBertForMultipleChoice'), ('big_bird', 'FlaxBigBirdForMultipleChoice'), ('distilbert', 'FlaxDistilBertForMultipleChoice'), ('electra', 'FlaxElectraForMultipleChoice'), ('roberta', 'FlaxRobertaForMultipleChoice'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'), ('roformer', 'FlaxRoFormerForMultipleChoice'), ('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'), ] ) __A : Any = OrderedDict( [ ('bert', 'FlaxBertForNextSentencePrediction'), ] ) __A : Optional[int] = OrderedDict( [ ('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'), ('whisper', 'FlaxWhisperForConditionalGeneration'), ] ) __A : List[str] = OrderedDict( [ ('whisper', 'FlaxWhisperForAudioClassification'), ] ) __A : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) __A : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) __A : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) __A : Tuple = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) __A : Any = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) __A : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) __A : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) __A : Optional[int] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) __A : List[str] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) __A : Union[str, Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) __A : List[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) __A : Optional[int] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) __A : List[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) __A : Tuple = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_MAPPING __A : Optional[int] = auto_class_update(FlaxAutoModel) class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING __A : Optional[Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining') class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING __A : Tuple = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling') class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING __A : List[Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling') class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING __A : int = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base' ) class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING __A : Optional[int] = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc='sequence classification' ) class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING __A : int = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering') class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING __A : List[Any] = auto_class_update( FlaxAutoModelForTokenClassification, head_doc='token classification' ) class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING __A : Any = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice') class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING __A : Union[str, Any] = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction' ) class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING __A : Optional[Any] = auto_class_update( FlaxAutoModelForImageClassification, head_doc='image classification' ) class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING __A : int = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling') class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING __A : Optional[int] = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling' )
16
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() __A : Optional[Any] = logging.get_logger(__name__) def __a ( A__ : int ): SCREAMING_SNAKE_CASE = DPTConfig() if "large" in checkpoint_url: SCREAMING_SNAKE_CASE = 1024 SCREAMING_SNAKE_CASE = 4096 SCREAMING_SNAKE_CASE = 24 SCREAMING_SNAKE_CASE = 16 SCREAMING_SNAKE_CASE = [5, 11, 17, 23] SCREAMING_SNAKE_CASE = [256, 512, 1024, 1024] SCREAMING_SNAKE_CASE = (1, 384, 384) if "ade" in checkpoint_url: SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = 150 SCREAMING_SNAKE_CASE = "huggingface/label-files" SCREAMING_SNAKE_CASE = "ade20k-id2label.json" SCREAMING_SNAKE_CASE = json.load(open(cached_download(hf_hub_url(A__ , A__ , repo_type="dataset" ) ) , "r" ) ) SCREAMING_SNAKE_CASE = {int(A__ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE = idalabel SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE = [1, 150, 480, 480] return config, expected_shape def __a ( A__ : Optional[Any] ): SCREAMING_SNAKE_CASE = ["pretrained.model.head.weight", "pretrained.model.head.bias"] for k in ignore_keys: state_dict.pop(A__ , A__ ) def __a ( A__ : Tuple ): if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): SCREAMING_SNAKE_CASE = name.replace("pretrained.model" , "dpt.encoder" ) if "pretrained.model" in name: SCREAMING_SNAKE_CASE = name.replace("pretrained.model" , "dpt.embeddings" ) if "patch_embed" in name: SCREAMING_SNAKE_CASE = name.replace("patch_embed" , "patch_embeddings" ) if "pos_embed" in name: SCREAMING_SNAKE_CASE = name.replace("pos_embed" , "position_embeddings" ) if "attn.proj" in name: SCREAMING_SNAKE_CASE = name.replace("attn.proj" , "attention.output.dense" ) if "proj" in name and "project" not in name: SCREAMING_SNAKE_CASE = name.replace("proj" , "projection" ) if "blocks" in name: SCREAMING_SNAKE_CASE = name.replace("blocks" , "layer" ) if "mlp.fc1" in name: SCREAMING_SNAKE_CASE = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: SCREAMING_SNAKE_CASE = name.replace("mlp.fc2" , "output.dense" ) if "norm1" in name: SCREAMING_SNAKE_CASE = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: SCREAMING_SNAKE_CASE = name.replace("norm2" , "layernorm_after" ) if "scratch.output_conv" in name: SCREAMING_SNAKE_CASE = name.replace("scratch.output_conv" , "head" ) if "scratch" in name: SCREAMING_SNAKE_CASE = name.replace("scratch" , "neck" ) if "layer1_rn" in name: SCREAMING_SNAKE_CASE = name.replace("layer1_rn" , "convs.0" ) if "layer2_rn" in name: SCREAMING_SNAKE_CASE = name.replace("layer2_rn" , "convs.1" ) if "layer3_rn" in name: SCREAMING_SNAKE_CASE = name.replace("layer3_rn" , "convs.2" ) if "layer4_rn" in name: SCREAMING_SNAKE_CASE = name.replace("layer4_rn" , "convs.3" ) if "refinenet" in name: SCREAMING_SNAKE_CASE = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 SCREAMING_SNAKE_CASE = name.replace(F"refinenet{layer_idx}" , F"fusion_stage.layers.{abs(layer_idx-4 )}" ) if "out_conv" in name: SCREAMING_SNAKE_CASE = name.replace("out_conv" , "projection" ) if "resConfUnit1" in name: SCREAMING_SNAKE_CASE = name.replace("resConfUnit1" , "residual_layer1" ) if "resConfUnit2" in name: SCREAMING_SNAKE_CASE = name.replace("resConfUnit2" , "residual_layer2" ) if "conv1" in name: SCREAMING_SNAKE_CASE = name.replace("conv1" , "convolution1" ) if "conv2" in name: SCREAMING_SNAKE_CASE = name.replace("conv2" , "convolution2" ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: SCREAMING_SNAKE_CASE = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0" ) if "pretrained.act_postprocess2.0.project.0" in name: SCREAMING_SNAKE_CASE = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0" ) if "pretrained.act_postprocess3.0.project.0" in name: SCREAMING_SNAKE_CASE = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0" ) if "pretrained.act_postprocess4.0.project.0" in name: SCREAMING_SNAKE_CASE = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0" ) # resize blocks if "pretrained.act_postprocess1.3" in name: SCREAMING_SNAKE_CASE = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection" ) if "pretrained.act_postprocess1.4" in name: SCREAMING_SNAKE_CASE = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize" ) if "pretrained.act_postprocess2.3" in name: SCREAMING_SNAKE_CASE = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection" ) if "pretrained.act_postprocess2.4" in name: SCREAMING_SNAKE_CASE = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize" ) if "pretrained.act_postprocess3.3" in name: SCREAMING_SNAKE_CASE = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection" ) if "pretrained.act_postprocess4.3" in name: SCREAMING_SNAKE_CASE = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection" ) if "pretrained.act_postprocess4.4" in name: SCREAMING_SNAKE_CASE = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize" ) if "pretrained" in name: SCREAMING_SNAKE_CASE = name.replace("pretrained" , "dpt" ) if "bn" in name: SCREAMING_SNAKE_CASE = name.replace("bn" , "batch_norm" ) if "head" in name: SCREAMING_SNAKE_CASE = name.replace("head" , "head.head" ) if "encoder.norm" in name: SCREAMING_SNAKE_CASE = name.replace("encoder.norm" , "layernorm" ) if "auxlayer" in name: SCREAMING_SNAKE_CASE = name.replace("auxlayer" , "auxiliary_head.head" ) return name def __a ( A__ : Dict , A__ : List[Any] ): for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) SCREAMING_SNAKE_CASE = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.weight" ) SCREAMING_SNAKE_CASE = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE = in_proj_weight[: config.hidden_size, :] SCREAMING_SNAKE_CASE = in_proj_bias[: config.hidden_size] SCREAMING_SNAKE_CASE = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] SCREAMING_SNAKE_CASE = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] SCREAMING_SNAKE_CASE = in_proj_weight[ -config.hidden_size :, : ] SCREAMING_SNAKE_CASE = in_proj_bias[-config.hidden_size :] def __a ( ): SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg" SCREAMING_SNAKE_CASE = Image.open(requests.get(A__ , stream=A__ ).raw ) return im @torch.no_grad() def __a ( A__ : Tuple , A__ : Tuple , A__ : int , A__ : Tuple ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_dpt_config(A__ ) # load original state_dict from URL SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(A__ , map_location="cpu" ) # remove certain keys remove_ignore_keys_(A__ ) # rename keys for key in state_dict.copy().keys(): SCREAMING_SNAKE_CASE = state_dict.pop(A__ ) SCREAMING_SNAKE_CASE = val # read in qkv matrices read_in_q_k_v(A__ , A__ ) # load HuggingFace model SCREAMING_SNAKE_CASE = DPTForSemanticSegmentation(A__ ) if "ade" in checkpoint_url else DPTForDepthEstimation(A__ ) model.load_state_dict(A__ ) model.eval() # Check outputs on an image SCREAMING_SNAKE_CASE = 480 if "ade" in checkpoint_url else 384 SCREAMING_SNAKE_CASE = DPTImageProcessor(size=A__ ) SCREAMING_SNAKE_CASE = prepare_img() SCREAMING_SNAKE_CASE = image_processor(A__ , return_tensors="pt" ) # forward pass SCREAMING_SNAKE_CASE = model(**A__ ).logits if "ade" in checkpoint_url else model(**A__ ).predicted_depth # Assert logits SCREAMING_SNAKE_CASE = torch.tensor([[6.3_1_9_9, 6.3_6_2_9, 6.4_1_4_8], [6.3_8_5_0, 6.3_6_1_5, 6.4_1_6_6], [6.3_5_1_9, 6.3_1_7_6, 6.3_5_7_5]] ) if "ade" in checkpoint_url: SCREAMING_SNAKE_CASE = torch.tensor([[4.0_4_8_0, 4.2_4_2_0, 4.4_3_6_0], [4.3_1_2_4, 4.5_6_9_3, 4.8_2_6_1], [4.5_7_6_8, 4.8_9_6_5, 5.2_1_6_3]] ) assert outputs.shape == torch.Size(A__ ) assert ( torch.allclose(outputs[0, 0, :3, :3] , A__ , atol=1E-4 ) if "ade" in checkpoint_url else torch.allclose(outputs[0, :3, :3] , A__ ) ) Path(A__ ).mkdir(exist_ok=A__ ) print(F"Saving model to {pytorch_dump_folder_path}" ) model.save_pretrained(A__ ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(A__ ) if push_to_hub: print("Pushing model to hub..." ) model.push_to_hub( repo_path_or_name=Path(A__ , A__ ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=A__ , ) image_processor.push_to_hub( repo_path_or_name=Path(A__ , A__ ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=A__ , ) if __name__ == "__main__": __A : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt', type=str, help='URL of the original DPT checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', ) parser.add_argument( '--model_name', default='dpt-large', type=str, help='Name of the model, in case you\'re pushing to the hub.', ) __A : Optional[int] = parser.parse_args() convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
16
def __a ( A__ : float , A__ : float ): if mass < 0: raise ValueError("The mass of a body cannot be negative" ) return 0.5 * mass * abs(A__ ) * abs(A__ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
16
1
__A : Tuple = {str(digit): digit**5 for digit in range(1_0)} def __a ( A__ : int ): return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A__ ) ) def __a ( ): return sum( number for number in range(1000 , 1000000 ) if number == digits_fifth_powers_sum(A__ ) ) if __name__ == "__main__": print(solution())
16
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf __A : Dict = logging.get_logger(__name__) @dataclass class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = [ "no_inference", "no_cuda", "no_tpu", "no_speed", "no_memory", "no_env_print", "no_multi_process", ] def __init__( self : List[Any] , **__lowerCamelCase : Any ): for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: SCREAMING_SNAKE_CASE = deprecated_arg[3:] SCREAMING_SNAKE_CASE = not kwargs.pop(__lowerCamelCase ) logger.warning( f"{deprecated_arg} is depreciated. Please use --no-{positive_arg} or" f" {positive_arg}={kwargs[positive_arg]}" ) SCREAMING_SNAKE_CASE = kwargs.pop("tpu_name" , self.tpu_name ) SCREAMING_SNAKE_CASE = kwargs.pop("device_idx" , self.device_idx ) SCREAMING_SNAKE_CASE = kwargs.pop("eager_mode" , self.eager_mode ) SCREAMING_SNAKE_CASE = kwargs.pop("use_xla" , self.use_xla ) super().__init__(**__lowerCamelCase ) lowerCamelCase__ = field( default=__snake_case , metadata={"help": "Name of TPU"} , ) lowerCamelCase__ = field( default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , ) lowerCamelCase__ = field(default=__snake_case , metadata={"help": "Benchmark models in eager model."} ) lowerCamelCase__ = field( default=__snake_case , metadata={ "help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`." } , ) @cached_property def _snake_case ( self : Optional[int] ): requires_backends(self , ["tf"] ) SCREAMING_SNAKE_CASE = None if self.tpu: try: if self.tpu_name: SCREAMING_SNAKE_CASE = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: SCREAMING_SNAKE_CASE = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: SCREAMING_SNAKE_CASE = None return tpu @cached_property def _snake_case ( self : Any ): requires_backends(self , ["tf"] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) SCREAMING_SNAKE_CASE = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" ) SCREAMING_SNAKE_CASE = tf.distribute.OneDeviceStrategy(device=f"/gpu:{self.device_idx}" ) else: tf.config.set_visible_devices([] , "GPU" ) # disable GPU SCREAMING_SNAKE_CASE = tf.distribute.OneDeviceStrategy(device=f"/cpu:{self.device_idx}" ) return strategy @property def _snake_case ( self : Any ): requires_backends(self , ["tf"] ) return self._setup_tpu is not None @property def _snake_case ( self : Optional[Any] ): requires_backends(self , ["tf"] ) return self._setup_strategy @property def _snake_case ( self : List[str] ): requires_backends(self , ["tf"] ) return tf.config.list_physical_devices("GPU" ) @property def _snake_case ( self : Any ): requires_backends(self , ["tf"] ) if self.cuda: return len(self.gpu_list ) return 0 @property def _snake_case ( self : Dict ): return self.n_gpu > 0
16
1
import json import os import re import unicodedata from json.encoder import INFINITY from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import regex from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging from ...utils.generic import _is_jax, _is_numpy __A : List[Any] = logging.get_logger(__name__) __A : int = { 'artists_file': 'artists.json', 'lyrics_file': 'lyrics.json', 'genres_file': 'genres.json', } __A : List[str] = { 'artists_file': { 'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json', }, 'genres_file': { 'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json', }, 'lyrics_file': { 'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json', }, } __A : Optional[Any] = { 'jukebox': 5_1_2, } class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = PRETRAINED_LYRIC_TOKENS_SIZES lowerCamelCase__ = ["input_ids", "attention_mask"] def __init__( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int]=["v3", "v2", "v2"] , __lowerCamelCase : str=512 , __lowerCamelCase : str=5 , __lowerCamelCase : List[str]="<|endoftext|>" , **__lowerCamelCase : Tuple , ): SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token super().__init__( unk_token=__lowerCamelCase , n_genres=__lowerCamelCase , version=__lowerCamelCase , max_n_lyric_tokens=__lowerCamelCase , **__lowerCamelCase , ) SCREAMING_SNAKE_CASE = version SCREAMING_SNAKE_CASE = max_n_lyric_tokens SCREAMING_SNAKE_CASE = n_genres with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle: SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase ) with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle: SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase ) with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle: SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase ) SCREAMING_SNAKE_CASE = r"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+" # In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters. if len(self.lyrics_encoder ) == 79: SCREAMING_SNAKE_CASE = oov.replace(r"\-'" , r"\-+'" ) SCREAMING_SNAKE_CASE = regex.compile(__lowerCamelCase ) SCREAMING_SNAKE_CASE = {v: k for k, v in self.artists_encoder.items()} SCREAMING_SNAKE_CASE = {v: k for k, v in self.genres_encoder.items()} SCREAMING_SNAKE_CASE = {v: k for k, v in self.lyrics_encoder.items()} @property def _snake_case ( self : str ): return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder ) def _snake_case ( self : Dict ): return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder ) def _snake_case ( self : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict ): SCREAMING_SNAKE_CASE = [self.artists_encoder.get(__lowerCamelCase , 0 ) for artist in list_artists] for genres in range(len(__lowerCamelCase ) ): SCREAMING_SNAKE_CASE = [self.genres_encoder.get(__lowerCamelCase , 0 ) for genre in list_genres[genres]] SCREAMING_SNAKE_CASE = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] )) SCREAMING_SNAKE_CASE = [[self.lyrics_encoder.get(__lowerCamelCase , 0 ) for character in list_lyrics[0]], [], []] return artists_id, list_genres, lyric_ids def _snake_case ( self : List[Any] , __lowerCamelCase : Any ): return list(__lowerCamelCase ) def _snake_case ( self : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , **__lowerCamelCase : Any ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.prepare_for_tokenization(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = self._tokenize(__lowerCamelCase ) return artist, genre, lyrics def _snake_case ( self : int , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : bool = False ): for idx in range(len(self.version ) ): if self.version[idx] == "v3": SCREAMING_SNAKE_CASE = artists[idx].lower() SCREAMING_SNAKE_CASE = [genres[idx].lower()] else: SCREAMING_SNAKE_CASE = self._normalize(artists[idx] ) + ".v2" SCREAMING_SNAKE_CASE = [ self._normalize(__lowerCamelCase ) + ".v2" for genre in genres[idx].split("_" ) ] # split is for the full dictionary with combined genres if self.version[0] == "v2": SCREAMING_SNAKE_CASE = regex.compile(r"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+" ) SCREAMING_SNAKE_CASE = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n" SCREAMING_SNAKE_CASE = {vocab[index]: index + 1 for index in range(len(__lowerCamelCase ) )} SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = len(__lowerCamelCase ) + 1 SCREAMING_SNAKE_CASE = self.vocab SCREAMING_SNAKE_CASE = {v: k for k, v in self.vocab.items()} SCREAMING_SNAKE_CASE = "" else: SCREAMING_SNAKE_CASE = regex.compile(r"[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+" ) SCREAMING_SNAKE_CASE = self._run_strip_accents(__lowerCamelCase ) SCREAMING_SNAKE_CASE = lyrics.replace("\\" , "\n" ) SCREAMING_SNAKE_CASE = self.out_of_vocab.sub("" , __lowerCamelCase ), [], [] return artists, genres, lyrics def _snake_case ( self : Any , __lowerCamelCase : Union[str, Any] ): SCREAMING_SNAKE_CASE = unicodedata.normalize("NFD" , __lowerCamelCase ) SCREAMING_SNAKE_CASE = [] for char in text: SCREAMING_SNAKE_CASE = unicodedata.category(__lowerCamelCase ) if cat == "Mn": continue output.append(__lowerCamelCase ) return "".join(__lowerCamelCase ) def _snake_case ( self : Any , __lowerCamelCase : str ): SCREAMING_SNAKE_CASE = ( [chr(__lowerCamelCase ) for i in range(ord("a" ) , ord("z" ) + 1 )] + [chr(__lowerCamelCase ) for i in range(ord("A" ) , ord("Z" ) + 1 )] + [chr(__lowerCamelCase ) for i in range(ord("0" ) , ord("9" ) + 1 )] + ["."] ) SCREAMING_SNAKE_CASE = frozenset(__lowerCamelCase ) SCREAMING_SNAKE_CASE = re.compile(r"_+" ) SCREAMING_SNAKE_CASE = "".join([c if c in accepted else "_" for c in text.lower()] ) SCREAMING_SNAKE_CASE = pattern.sub("_" , __lowerCamelCase ).strip("_" ) return text def _snake_case ( self : Tuple , __lowerCamelCase : List[str] ): return " ".join(__lowerCamelCase ) def _snake_case ( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : bool = False ): # Convert to TensorType if not isinstance(__lowerCamelCase , __lowerCamelCase ): SCREAMING_SNAKE_CASE = TensorType(__lowerCamelCase ) # Get a function reference for the correct framework if tensor_type == TensorType.TENSORFLOW: if not is_tf_available(): raise ImportError( "Unable to convert output to TensorFlow tensors format, TensorFlow is not installed." ) import tensorflow as tf SCREAMING_SNAKE_CASE = tf.constant SCREAMING_SNAKE_CASE = tf.is_tensor elif tensor_type == TensorType.PYTORCH: if not is_torch_available(): raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed." ) import torch SCREAMING_SNAKE_CASE = torch.tensor SCREAMING_SNAKE_CASE = torch.is_tensor elif tensor_type == TensorType.JAX: if not is_flax_available(): raise ImportError("Unable to convert output to JAX tensors format, JAX is not installed." ) import jax.numpy as jnp # noqa: F811 SCREAMING_SNAKE_CASE = jnp.array SCREAMING_SNAKE_CASE = _is_jax else: SCREAMING_SNAKE_CASE = np.asarray SCREAMING_SNAKE_CASE = _is_numpy # Do the tensor conversion in batch try: if prepend_batch_axis: SCREAMING_SNAKE_CASE = [inputs] if not is_tensor(__lowerCamelCase ): SCREAMING_SNAKE_CASE = as_tensor(__lowerCamelCase ) except: # noqa E722 raise ValueError( "Unable to create tensor, you should probably activate truncation and/or padding " "with 'padding=True' 'truncation=True' to have batched tensors with the same length." ) return inputs def __call__( self : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : str="" , __lowerCamelCase : Union[str, Any]="pt" ): SCREAMING_SNAKE_CASE = [0, 0, 0] SCREAMING_SNAKE_CASE = [artist] * len(self.version ) SCREAMING_SNAKE_CASE = [genres] * len(self.version ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.tokenize(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._convert_token_to_id(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = [-INFINITY] * len(full_tokens[-1] ) SCREAMING_SNAKE_CASE = [ self.convert_to_tensors( [input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=__lowerCamelCase ) for i in range(len(self.version ) ) ] return BatchEncoding({"input_ids": input_ids, "attention_masks": attention_masks} ) def _snake_case ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): if not os.path.isdir(__lowerCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return SCREAMING_SNAKE_CASE = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["artists_file"] ) with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.artists_encoder , ensure_ascii=__lowerCamelCase ) ) SCREAMING_SNAKE_CASE = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["genres_file"] ) with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.genres_encoder , ensure_ascii=__lowerCamelCase ) ) SCREAMING_SNAKE_CASE = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["lyrics_file"] ) with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.lyrics_encoder , ensure_ascii=__lowerCamelCase ) ) return (artists_file, genres_file, lyrics_file) def _snake_case ( self : str , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Any ): SCREAMING_SNAKE_CASE = self.artists_decoder.get(__lowerCamelCase ) SCREAMING_SNAKE_CASE = [self.genres_decoder.get(__lowerCamelCase ) for genre in genres_index] SCREAMING_SNAKE_CASE = [self.lyrics_decoder.get(__lowerCamelCase ) for character in lyric_index] return artist, genres, lyrics
16
from collections.abc import Callable import numpy as np def __a ( A__ : Callable , A__ : float , A__ : float , A__ : float , A__ : float ): SCREAMING_SNAKE_CASE = int(np.ceil((x_end - xa) / step_size ) ) SCREAMING_SNAKE_CASE = np.zeros((n + 1,) ) SCREAMING_SNAKE_CASE = ya SCREAMING_SNAKE_CASE = xa for k in range(A__ ): SCREAMING_SNAKE_CASE = y[k] + step_size * ode_func(A__ , y[k] ) SCREAMING_SNAKE_CASE = y[k] + ( (step_size / 2) * (ode_func(A__ , y[k] ) + ode_func(x + step_size , A__ )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
16
1
def __a ( ): return [ a * b * (1000 - a - b) for a in range(1 , 999 ) for b in range(A__ , 999 ) if (a * a + b * b == (1000 - a - b) ** 2) ][0] if __name__ == "__main__": print(f'{solution() = }')
16
def __a ( A__ : int ): if not isinstance(A__ , A__ ): raise ValueError("Input must be an integer" ) if input_num <= 0: raise ValueError("Input must be positive" ) return sum( divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 ) if __name__ == "__main__": import doctest doctest.testmod()
16
1
def __a ( A__ : float , A__ : float ): if mass < 0: raise ValueError("The mass of a body cannot be negative" ) return 0.5 * mass * abs(A__ ) * abs(A__ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
16
from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent __A : List[Any] = {'UserAgent': UserAgent().random} def __a ( A__ : List[Any] ): SCREAMING_SNAKE_CASE = script.contents[0] SCREAMING_SNAKE_CASE = json.loads(data[data.find("{\"config\"" ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : List[str] , __lowerCamelCase : Optional[Any] ): SCREAMING_SNAKE_CASE = f"https://www.instagram.com/{username}/" SCREAMING_SNAKE_CASE = self.get_json() def _snake_case ( self : Tuple ): SCREAMING_SNAKE_CASE = requests.get(self.url , headers=__lowerCamelCase ).text SCREAMING_SNAKE_CASE = BeautifulSoup(__lowerCamelCase , "html.parser" ).find_all("script" ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__( self : Union[str, Any] ): return f"{self.__class__.__name__}('{self.username}')" def __str__( self : str ): return f"{self.fullname} ({self.username}) is {self.biography}" @property def _snake_case ( self : Optional[int] ): return self.user_data["username"] @property def _snake_case ( self : List[Any] ): return self.user_data["full_name"] @property def _snake_case ( self : List[str] ): return self.user_data["biography"] @property def _snake_case ( self : Tuple ): return self.user_data["business_email"] @property def _snake_case ( self : Optional[Any] ): return self.user_data["external_url"] @property def _snake_case ( self : int ): return self.user_data["edge_followed_by"]["count"] @property def _snake_case ( self : List[str] ): return self.user_data["edge_follow"]["count"] @property def _snake_case ( self : List[Any] ): return self.user_data["edge_owner_to_timeline_media"]["count"] @property def _snake_case ( self : Any ): return self.user_data["profile_pic_url_hd"] @property def _snake_case ( self : Optional[int] ): return self.user_data["is_verified"] @property def _snake_case ( self : Dict ): return self.user_data["is_private"] def __a ( A__ : str = "github" ): import os if os.environ.get("CI" ): return # test failing on GitHub Actions SCREAMING_SNAKE_CASE = InstagramUser(A__ ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , A__ ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 150 assert instagram_user.number_of_followers > 120000 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "[email protected]" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith("https://instagram." ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() __A : Dict = InstagramUser('github') print(instagram_user) print(f'{instagram_user.number_of_posts = }') print(f'{instagram_user.number_of_followers = }') print(f'{instagram_user.number_of_followings = }') print(f'{instagram_user.email = }') print(f'{instagram_user.website = }') print(f'{instagram_user.profile_picture_url = }') print(f'{instagram_user.is_verified = }') print(f'{instagram_user.is_private = }')
16
1
import unittest import torch from torch import nn from diffusers.models.activations import get_activation class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE = get_activation("swish" ) self.assertIsInstance(__lowerCamelCase , nn.SiLU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def _snake_case ( self : List[str] ): SCREAMING_SNAKE_CASE = get_activation("silu" ) self.assertIsInstance(__lowerCamelCase , nn.SiLU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def _snake_case ( self : Optional[Any] ): SCREAMING_SNAKE_CASE = get_activation("mish" ) self.assertIsInstance(__lowerCamelCase , nn.Mish ) self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = get_activation("gelu" ) self.assertIsInstance(__lowerCamelCase , nn.GELU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
16
import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __A : Any = logging.get_logger(__name__) __A : Any = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } __A : Optional[Any] = { 'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'}, 'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'}, 'tokenizer_config_file': { 'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json' }, } __A : Union[str, Any] = {'facebook/blenderbot-3B': 1_2_8} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def __a ( ): SCREAMING_SNAKE_CASE = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) SCREAMING_SNAKE_CASE = bs[:] SCREAMING_SNAKE_CASE = 0 for b in range(2**8 ): if b not in bs: bs.append(A__ ) cs.append(2**8 + n ) n += 1 SCREAMING_SNAKE_CASE = [chr(A__ ) for n in cs] return dict(zip(A__ , A__ ) ) def __a ( A__ : List[Any] ): SCREAMING_SNAKE_CASE = set() SCREAMING_SNAKE_CASE = word[0] for char in word[1:]: pairs.add((prev_char, char) ) SCREAMING_SNAKE_CASE = char return pairs class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ = ["input_ids", "attention_mask"] def __init__( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any="replace" , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : Optional[Any]="</s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : Union[str, Any]="<s>" , __lowerCamelCase : List[str]="<unk>" , __lowerCamelCase : Optional[Any]="<pad>" , __lowerCamelCase : Dict="<mask>" , __lowerCamelCase : Any=False , **__lowerCamelCase : Optional[Any] , ): SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token super().__init__( errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , ) with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle: SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase ) SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()} SCREAMING_SNAKE_CASE = errors # how to handle errors in decoding SCREAMING_SNAKE_CASE = bytes_to_unicode() SCREAMING_SNAKE_CASE = {v: k for k, v in self.byte_encoder.items()} with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle: SCREAMING_SNAKE_CASE = merges_handle.read().split("\n" )[1:-1] SCREAMING_SNAKE_CASE = [tuple(merge.split() ) for merge in bpe_merges] SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions SCREAMING_SNAKE_CASE = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def _snake_case ( self : str ): return len(self.encoder ) def _snake_case ( self : Union[str, Any] ): return dict(self.encoder , **self.added_tokens_encoder ) def _snake_case ( self : Dict , __lowerCamelCase : List[Any] ): if token in self.cache: return self.cache[token] SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase ) SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase ) if not pairs: return token while True: SCREAMING_SNAKE_CASE = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = bigram SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = 0 while i < len(__lowerCamelCase ): try: SCREAMING_SNAKE_CASE = word.index(__lowerCamelCase , __lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) SCREAMING_SNAKE_CASE = j if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase ) SCREAMING_SNAKE_CASE = new_word if len(__lowerCamelCase ) == 1: break else: SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase ) SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase ) SCREAMING_SNAKE_CASE = word return word def _snake_case ( self : Optional[Any] , __lowerCamelCase : List[Any] ): SCREAMING_SNAKE_CASE = [] for token in re.findall(self.pat , __lowerCamelCase ): SCREAMING_SNAKE_CASE = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) ) return bpe_tokens def _snake_case ( self : Tuple , __lowerCamelCase : Dict ): return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) ) def _snake_case ( self : Any , __lowerCamelCase : Optional[int] ): return self.decoder.get(__lowerCamelCase ) def _snake_case ( self : Optional[int] , __lowerCamelCase : List[Any] ): SCREAMING_SNAKE_CASE = "".join(__lowerCamelCase ) SCREAMING_SNAKE_CASE = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def _snake_case ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): if not os.path.isdir(__lowerCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return SCREAMING_SNAKE_CASE = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) SCREAMING_SNAKE_CASE = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" ) SCREAMING_SNAKE_CASE = 0 with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) SCREAMING_SNAKE_CASE = token_index writer.write(" ".join(__lowerCamelCase ) + "\n" ) index += 1 return vocab_file, merge_file def _snake_case ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCamelCase )) + [1] return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1] def _snake_case ( self : Optional[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): SCREAMING_SNAKE_CASE = [self.sep_token_id] SCREAMING_SNAKE_CASE = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Any=False , **__lowerCamelCase : Union[str, Any] ): SCREAMING_SNAKE_CASE = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()): SCREAMING_SNAKE_CASE = " " + text return (text, kwargs) def _snake_case ( self : Any , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): return token_ids_a + [self.eos_token_id] def _snake_case ( self : int , __lowerCamelCase : "Conversation" ): SCREAMING_SNAKE_CASE = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(" " + text ) else: # Generated responses should contain them already. inputs.append(__lowerCamelCase ) SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.encode(__lowerCamelCase ) if len(__lowerCamelCase ) > self.model_max_length: SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :] logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." ) return input_ids
16
1
def __a ( A__ : int ): return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number if __name__ == "__main__": print('Program to check whether a number is a Perfect number or not...') __A : List[str] = int(input('Enter number: ').strip()) print(f'{number} is {"" if perfect(number) else "not "}a Perfect Number.')
16
import argparse import json import math import os import time import traceback import zipfile from collections import Counter import requests def __a ( A__ : str , A__ : List[Any]=None ): SCREAMING_SNAKE_CASE = None if token is not None: SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"} SCREAMING_SNAKE_CASE = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100" SCREAMING_SNAKE_CASE = requests.get(A__ , headers=A__ ).json() SCREAMING_SNAKE_CASE = {} try: job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} ) SCREAMING_SNAKE_CASE = math.ceil((result["total_count"] - 100) / 100 ) for i in range(A__ ): SCREAMING_SNAKE_CASE = requests.get(url + F"&page={i + 2}" , headers=A__ ).json() job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} ) return job_links except Exception: print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" ) return {} def __a ( A__ : List[Any] , A__ : Optional[int]=None ): SCREAMING_SNAKE_CASE = None if token is not None: SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"} SCREAMING_SNAKE_CASE = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100" SCREAMING_SNAKE_CASE = requests.get(A__ , headers=A__ ).json() SCREAMING_SNAKE_CASE = {} try: artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} ) SCREAMING_SNAKE_CASE = math.ceil((result["total_count"] - 100) / 100 ) for i in range(A__ ): SCREAMING_SNAKE_CASE = requests.get(url + F"&page={i + 2}" , headers=A__ ).json() artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} ) return artifacts except Exception: print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" ) return {} def __a ( A__ : Any , A__ : str , A__ : List[str] , A__ : Union[str, Any] ): SCREAMING_SNAKE_CASE = None if token is not None: SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"} SCREAMING_SNAKE_CASE = requests.get(A__ , headers=A__ , allow_redirects=A__ ) SCREAMING_SNAKE_CASE = result.headers["Location"] SCREAMING_SNAKE_CASE = requests.get(A__ , allow_redirects=A__ ) SCREAMING_SNAKE_CASE = os.path.join(A__ , F"{artifact_name}.zip" ) with open(A__ , "wb" ) as fp: fp.write(response.content ) def __a ( A__ : List[Any] , A__ : List[Any]=None ): SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = None with zipfile.ZipFile(A__ ) as z: for filename in z.namelist(): if not os.path.isdir(A__ ): # read the file if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]: with z.open(A__ ) as f: for line in f: SCREAMING_SNAKE_CASE = line.decode("UTF-8" ).strip() if filename == "failures_line.txt": try: # `error_line` is the place where `error` occurs SCREAMING_SNAKE_CASE = line[: line.index(": " )] SCREAMING_SNAKE_CASE = line[line.index(": " ) + len(": " ) :] errors.append([error_line, error] ) except Exception: # skip un-related lines pass elif filename == "summary_short.txt" and line.startswith("FAILED " ): # `test` is the test method that failed SCREAMING_SNAKE_CASE = line[len("FAILED " ) :] failed_tests.append(A__ ) elif filename == "job_name.txt": SCREAMING_SNAKE_CASE = line if len(A__ ) != len(A__ ): raise ValueError( F"`errors` and `failed_tests` should have the same number of elements. Got {len(A__ )} for `errors` " F"and {len(A__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some" " problem." ) SCREAMING_SNAKE_CASE = None if job_name and job_links: SCREAMING_SNAKE_CASE = job_links.get(A__ , A__ ) # A list with elements of the form (line of error, error, failed test) SCREAMING_SNAKE_CASE = [x + [y] + [job_link] for x, y in zip(A__ , A__ )] return result def __a ( A__ : Union[str, Any] , A__ : Union[str, Any]=None ): SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [os.path.join(A__ , A__ ) for p in os.listdir(A__ ) if p.endswith(".zip" )] for p in paths: errors.extend(get_errors_from_single_artifact(A__ , job_links=A__ ) ) return errors def __a ( A__ : List[str] , A__ : Tuple=None ): SCREAMING_SNAKE_CASE = Counter() counter.update([x[1] for x in logs] ) SCREAMING_SNAKE_CASE = counter.most_common() SCREAMING_SNAKE_CASE = {} for error, count in counts: if error_filter is None or error not in error_filter: SCREAMING_SNAKE_CASE = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]} SCREAMING_SNAKE_CASE = dict(sorted(r.items() , key=lambda A__ : item[1]["count"] , reverse=A__ ) ) return r def __a ( A__ : str ): SCREAMING_SNAKE_CASE = test.split("::" )[0] if test.startswith("tests/models/" ): SCREAMING_SNAKE_CASE = test.split("/" )[2] else: SCREAMING_SNAKE_CASE = None return test def __a ( A__ : List[str] , A__ : Dict=None ): SCREAMING_SNAKE_CASE = [(x[0], x[1], get_model(x[2] )) for x in logs] SCREAMING_SNAKE_CASE = [x for x in logs if x[2] is not None] SCREAMING_SNAKE_CASE = {x[2] for x in logs} SCREAMING_SNAKE_CASE = {} for test in tests: SCREAMING_SNAKE_CASE = Counter() # count by errors in `test` counter.update([x[1] for x in logs if x[2] == test] ) SCREAMING_SNAKE_CASE = counter.most_common() SCREAMING_SNAKE_CASE = {error: count for error, count in counts if (error_filter is None or error not in error_filter)} SCREAMING_SNAKE_CASE = sum(error_counts.values() ) if n_errors > 0: SCREAMING_SNAKE_CASE = {"count": n_errors, "errors": error_counts} SCREAMING_SNAKE_CASE = dict(sorted(r.items() , key=lambda A__ : item[1]["count"] , reverse=A__ ) ) return r def __a ( A__ : Dict ): SCREAMING_SNAKE_CASE = "| no. | error | status |" SCREAMING_SNAKE_CASE = "|-:|:-|:-|" SCREAMING_SNAKE_CASE = [header, sep] for error in reduced_by_error: SCREAMING_SNAKE_CASE = reduced_by_error[error]["count"] SCREAMING_SNAKE_CASE = F"| {count} | {error[:100]} | |" lines.append(A__ ) return "\n".join(A__ ) def __a ( A__ : Optional[Any] ): SCREAMING_SNAKE_CASE = "| model | no. of errors | major error | count |" SCREAMING_SNAKE_CASE = "|-:|-:|-:|-:|" SCREAMING_SNAKE_CASE = [header, sep] for model in reduced_by_model: SCREAMING_SNAKE_CASE = reduced_by_model[model]["count"] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = list(reduced_by_model[model]["errors"].items() )[0] SCREAMING_SNAKE_CASE = F"| {model} | {count} | {error[:60]} | {_count} |" lines.append(A__ ) return "\n".join(A__ ) if __name__ == "__main__": __A : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.') parser.add_argument( '--output_dir', type=str, required=True, help='Where to store the downloaded artifacts and other result files.', ) parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.') __A : Union[str, Any] = parser.parse_args() os.makedirs(args.output_dir, exist_ok=True) __A : int = get_job_links(args.workflow_run_id, token=args.token) __A : Dict = {} # To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee. # For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`. if _job_links: for k, v in _job_links.items(): # This is how GitHub actions combine job names. if " / " in k: __A : Union[str, Any] = k.find(' / ') __A : Optional[int] = k[index + len(' / ') :] __A : Optional[int] = v with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp: json.dump(job_links, fp, ensure_ascii=False, indent=4) __A : int = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) for idx, (name, url) in enumerate(artifacts.items()): download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) __A : Optional[int] = get_all_errors(args.output_dir, job_links=job_links) # `e[1]` is the error __A : Dict = Counter() counter.update([e[1] for e in errors]) # print the top 30 most common test errors __A : Optional[Any] = counter.most_common(3_0) for item in most_common: print(item) with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp: json.dump(errors, fp, ensure_ascii=False, indent=4) __A : str = reduce_by_error(errors) __A : int = reduce_by_model(errors) __A : Any = make_github_table(reduced_by_error) __A : List[str] = make_github_table_per_model(reduced_by_model) with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp: fp.write(sa) with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp: fp.write(sa)
16
1
from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = ["vqvae"] def __init__( self : int , __lowerCamelCase : AutoencoderKL , __lowerCamelCase : UNetaDConditionModel , __lowerCamelCase : Mel , __lowerCamelCase : Union[DDIMScheduler, DDPMScheduler] , ): super().__init__() self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase , mel=__lowerCamelCase , vqvae=__lowerCamelCase ) def _snake_case ( self : int ): return 50 if isinstance(self.scheduler , __lowerCamelCase ) else 1000 @torch.no_grad() def __call__( self : Dict , __lowerCamelCase : int = 1 , __lowerCamelCase : str = None , __lowerCamelCase : np.ndarray = None , __lowerCamelCase : int = 0 , __lowerCamelCase : int = 0 , __lowerCamelCase : int = None , __lowerCamelCase : torch.Generator = None , __lowerCamelCase : float = 0 , __lowerCamelCase : float = 0 , __lowerCamelCase : torch.Generator = None , __lowerCamelCase : float = 0 , __lowerCamelCase : torch.Tensor = None , __lowerCamelCase : torch.Tensor = None , __lowerCamelCase : Optional[int]=True , ): SCREAMING_SNAKE_CASE = steps or self.get_default_steps() self.scheduler.set_timesteps(__lowerCamelCase ) SCREAMING_SNAKE_CASE = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: SCREAMING_SNAKE_CASE = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: SCREAMING_SNAKE_CASE = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=__lowerCamelCase , device=self.device , ) SCREAMING_SNAKE_CASE = noise SCREAMING_SNAKE_CASE = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = self.mel.audio_slice_to_image(__lowerCamelCase ) SCREAMING_SNAKE_CASE = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape( (input_image.height, input_image.width) ) SCREAMING_SNAKE_CASE = (input_image / 255) * 2 - 1 SCREAMING_SNAKE_CASE = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device ) if self.vqvae is not None: SCREAMING_SNAKE_CASE = self.vqvae.encode(torch.unsqueeze(__lowerCamelCase , 0 ) ).latent_dist.sample( generator=__lowerCamelCase )[0] SCREAMING_SNAKE_CASE = self.vqvae.config.scaling_factor * input_images if start_step > 0: SCREAMING_SNAKE_CASE = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , self.scheduler.timesteps[start_step - 1] ) SCREAMING_SNAKE_CASE = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) SCREAMING_SNAKE_CASE = int(mask_start_secs * pixels_per_second ) SCREAMING_SNAKE_CASE = int(mask_end_secs * pixels_per_second ) SCREAMING_SNAKE_CASE = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet , __lowerCamelCase ): SCREAMING_SNAKE_CASE = self.unet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )["sample"] else: SCREAMING_SNAKE_CASE = self.unet(__lowerCamelCase , __lowerCamelCase )["sample"] if isinstance(self.scheduler , __lowerCamelCase ): SCREAMING_SNAKE_CASE = self.scheduler.step( model_output=__lowerCamelCase , timestep=__lowerCamelCase , sample=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , )["prev_sample"] else: SCREAMING_SNAKE_CASE = self.scheduler.step( model_output=__lowerCamelCase , timestep=__lowerCamelCase , sample=__lowerCamelCase , generator=__lowerCamelCase , )["prev_sample"] if mask is not None: if mask_start > 0: SCREAMING_SNAKE_CASE = mask[:, step, :, :mask_start] if mask_end > 0: SCREAMING_SNAKE_CASE = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance SCREAMING_SNAKE_CASE = 1 / self.vqvae.config.scaling_factor * images SCREAMING_SNAKE_CASE = self.vqvae.decode(__lowerCamelCase )["sample"] SCREAMING_SNAKE_CASE = (images / 2 + 0.5).clamp(0 , 1 ) SCREAMING_SNAKE_CASE = images.cpu().permute(0 , 2 , 3 , 1 ).numpy() SCREAMING_SNAKE_CASE = (images * 255).round().astype("uint8" ) SCREAMING_SNAKE_CASE = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(__lowerCamelCase , mode="RGB" ).convert("L" ) for _ in images) ) SCREAMING_SNAKE_CASE = [self.mel.image_to_audio(__lowerCamelCase ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(__lowerCamelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__lowerCamelCase ) ) @torch.no_grad() def _snake_case ( self : int , __lowerCamelCase : List[Image.Image] , __lowerCamelCase : int = 50 ): assert isinstance(self.scheduler , __lowerCamelCase ) self.scheduler.set_timesteps(__lowerCamelCase ) SCREAMING_SNAKE_CASE = np.array( [np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] ) SCREAMING_SNAKE_CASE = (sample / 255) * 2 - 1 SCREAMING_SNAKE_CASE = torch.Tensor(__lowerCamelCase ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ): SCREAMING_SNAKE_CASE = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps SCREAMING_SNAKE_CASE = self.scheduler.alphas_cumprod[t] SCREAMING_SNAKE_CASE = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) SCREAMING_SNAKE_CASE = 1 - alpha_prod_t SCREAMING_SNAKE_CASE = self.unet(__lowerCamelCase , __lowerCamelCase )["sample"] SCREAMING_SNAKE_CASE = (1 - alpha_prod_t_prev) ** 0.5 * model_output SCREAMING_SNAKE_CASE = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) SCREAMING_SNAKE_CASE = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def _snake_case ( __lowerCamelCase : torch.Tensor , __lowerCamelCase : torch.Tensor , __lowerCamelCase : float ): SCREAMING_SNAKE_CASE = acos(torch.dot(torch.flatten(__lowerCamelCase ) , torch.flatten(__lowerCamelCase ) ) / torch.norm(__lowerCamelCase ) / torch.norm(__lowerCamelCase ) ) return sin((1 - alpha) * theta ) * xa / sin(__lowerCamelCase ) + sin(alpha * theta ) * xa / sin(__lowerCamelCase )
16
from ...configuration_utils import PretrainedConfig from ...utils import logging __A : Optional[int] = logging.get_logger(__name__) __A : Optional[Any] = { 'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json', # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = "gpt_neox" def __init__( self : Optional[int] , __lowerCamelCase : List[str]=50432 , __lowerCamelCase : int=6144 , __lowerCamelCase : Optional[Any]=44 , __lowerCamelCase : Tuple=64 , __lowerCamelCase : Optional[int]=24576 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Any=0.25 , __lowerCamelCase : List[Any]=10000 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : int=0.0 , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[Any]=2048 , __lowerCamelCase : Tuple=0.02 , __lowerCamelCase : Tuple=1e-5 , __lowerCamelCase : Dict=True , __lowerCamelCase : int=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : List[str]=False , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : str , ): super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = rotary_pct SCREAMING_SNAKE_CASE = rotary_emb_base SCREAMING_SNAKE_CASE = attention_dropout SCREAMING_SNAKE_CASE = hidden_dropout SCREAMING_SNAKE_CASE = classifier_dropout SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = use_cache SCREAMING_SNAKE_CASE = tie_word_embeddings SCREAMING_SNAKE_CASE = use_parallel_residual SCREAMING_SNAKE_CASE = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( "The hidden size is not divisble by the number of attention heads! Make sure to update them!" ) def _snake_case ( self : Union[str, Any] ): if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __lowerCamelCase ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " f"got {self.rope_scaling}" ) SCREAMING_SNAKE_CASE = self.rope_scaling.get("type" , __lowerCamelCase ) SCREAMING_SNAKE_CASE = self.rope_scaling.get("factor" , __lowerCamelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" ) if rope_scaling_factor is None or not isinstance(__lowerCamelCase , __lowerCamelCase ) or rope_scaling_factor <= 1.0: raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
16
1
from __future__ import annotations def __a ( A__ : list[int | str] ): create_state_space_tree(A__ , [] , 0 , [0 for i in range(len(A__ ) )] ) def __a ( A__ : list[int | str] , A__ : list[int | str] , A__ : int , A__ : list[int] , ): if index == len(A__ ): print(A__ ) return for i in range(len(A__ ) ): if not index_used[i]: current_sequence.append(sequence[i] ) SCREAMING_SNAKE_CASE = True create_state_space_tree(A__ , A__ , index + 1 , A__ ) current_sequence.pop() SCREAMING_SNAKE_CASE = False __A : list[int | str] = [3, 1, 2, 4] generate_all_permutations(sequence) __A : list[int | str] = ["A", "B", "C"] generate_all_permutations(sequence_a)
16
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A : List[Any] = { 'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'], 'processing_git': ['GitProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : int = [ 'GIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'GitForCausalLM', 'GitModel', 'GitPreTrainedModel', 'GitVisionModel', ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys __A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
16
1
import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": __A : str = pd.read_csv('sample_data.csv', header=None) __A : Union[str, Any] = df.shape[:1][0] # If you're using some other dataset input the target column __A : Union[str, Any] = df.iloc[:, 1:2] __A : Any = actual_data.values.reshape(len_data, 1) __A : List[Any] = MinMaxScaler().fit_transform(actual_data) __A : List[Any] = 1_0 __A : Optional[Any] = 5 __A : Union[str, Any] = 2_0 __A : str = len_data - periods * look_back __A : List[Any] = actual_data[:division] __A : List[Any] = actual_data[division - look_back :] __A , __A : str = [], [] __A , __A : Dict = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) __A : Union[str, Any] = np.array(train_x) __A : int = np.array(test_x) __A : Optional[Any] = np.array([list(i.ravel()) for i in train_y]) __A : str = np.array([list(i.ravel()) for i in test_y]) __A : Optional[Any] = Sequential() model.add(LSTM(1_2_8, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(6_4, input_shape=(1_2_8, 1))) model.add(Dense(forward_days)) model.compile(loss='mean_squared_error', optimizer='adam') __A : List[str] = model.fit( x_train, y_train, epochs=1_5_0, verbose=1, shuffle=True, batch_size=4 ) __A : str = model.predict(x_test)
16
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __A : Union[str, Any] = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) __A : List[Any] = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias')) rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias')) rename_keys.append( (f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append( ( f'transformer.decoder.layers.{i}.cross_attn.out_proj.weight', f'decoder.layers.{i}.encoder_attn.out_proj.weight', ) ) rename_keys.append( ( f'transformer.decoder.layers.{i}.cross_attn.out_proj.bias', f'decoder.layers.{i}.encoder_attn.out_proj.bias', ) ) rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias')) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (f'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', f'decoder.layers.{i}.sa_qcontent_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', f'decoder.layers.{i}.sa_kcontent_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.sa_qpos_proj.weight', f'decoder.layers.{i}.sa_qpos_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.sa_kpos_proj.weight', f'decoder.layers.{i}.sa_kpos_proj.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.weight', f'decoder.layers.{i}.sa_v_proj.weight')) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', f'decoder.layers.{i}.ca_qcontent_proj.weight') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', f'decoder.layers.{i}.ca_kcontent_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_kpos_proj.weight', f'decoder.layers.{i}.ca_kpos_proj.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.weight', f'decoder.layers.{i}.ca_v_proj.weight')) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', f'decoder.layers.{i}.ca_qpos_sine_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', f'decoder.layers.{i}.sa_qcontent_proj.bias') ) rename_keys.append( (f'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', f'decoder.layers.{i}.sa_kcontent_proj.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.sa_qpos_proj.bias', f'decoder.layers.{i}.sa_qpos_proj.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.sa_kpos_proj.bias', f'decoder.layers.{i}.sa_kpos_proj.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.bias', f'decoder.layers.{i}.sa_v_proj.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', f'decoder.layers.{i}.ca_qcontent_proj.bias') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', f'decoder.layers.{i}.ca_kcontent_proj.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.ca_kpos_proj.bias', f'decoder.layers.{i}.ca_kpos_proj.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.bias', f'decoder.layers.{i}.ca_v_proj.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', f'decoder.layers.{i}.ca_qpos_sine_proj.bias') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ('input_proj.weight', 'input_projection.weight'), ('input_proj.bias', 'input_projection.bias'), ('query_embed.weight', 'query_position_embeddings.weight'), ('transformer.decoder.norm.weight', 'decoder.layernorm.weight'), ('transformer.decoder.norm.bias', 'decoder.layernorm.bias'), ('class_embed.weight', 'class_labels_classifier.weight'), ('class_embed.bias', 'class_labels_classifier.bias'), ('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'), ('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'), ('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'), ('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'), ('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'), ('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'), ('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'), ('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'), ('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'), ('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'), ('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'), ('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'), ('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'), ('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'), ('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'), ('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'), ] ) def __a ( A__ : Dict , A__ : Dict , A__ : Any ): SCREAMING_SNAKE_CASE = state_dict.pop(A__ ) SCREAMING_SNAKE_CASE = val def __a ( A__ : Optional[int] ): SCREAMING_SNAKE_CASE = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: SCREAMING_SNAKE_CASE = key.replace("backbone.0.body" , "backbone.conv_encoder.model" ) SCREAMING_SNAKE_CASE = value else: SCREAMING_SNAKE_CASE = value return new_state_dict def __a ( A__ : Optional[Any] , A__ : Tuple=False ): SCREAMING_SNAKE_CASE = "" if is_panoptic: SCREAMING_SNAKE_CASE = "conditional_detr." # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" ) SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE = in_proj_weight[:256, :] SCREAMING_SNAKE_CASE = in_proj_bias[:256] SCREAMING_SNAKE_CASE = in_proj_weight[256:512, :] SCREAMING_SNAKE_CASE = in_proj_bias[256:512] SCREAMING_SNAKE_CASE = in_proj_weight[-256:, :] SCREAMING_SNAKE_CASE = in_proj_bias[-256:] def __a ( ): SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg" SCREAMING_SNAKE_CASE = Image.open(requests.get(A__ , stream=A__ ).raw ) return im @torch.no_grad() def __a ( A__ : List[str] , A__ : Union[str, Any] ): SCREAMING_SNAKE_CASE = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: SCREAMING_SNAKE_CASE = "resnet101" if "dc5" in model_name: SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = "panoptic" in model_name if is_panoptic: SCREAMING_SNAKE_CASE = 250 else: SCREAMING_SNAKE_CASE = 91 SCREAMING_SNAKE_CASE = "huggingface/label-files" SCREAMING_SNAKE_CASE = "coco-detection-id2label.json" SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(A__ , A__ , repo_type="dataset" ) , "r" ) ) SCREAMING_SNAKE_CASE = {int(A__ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE = idalabel SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} # load image processor SCREAMING_SNAKE_CASE = "coco_panoptic" if is_panoptic else "coco_detection" SCREAMING_SNAKE_CASE = ConditionalDetrImageProcessor(format=A__ ) # prepare image SCREAMING_SNAKE_CASE = prepare_img() SCREAMING_SNAKE_CASE = image_processor(images=A__ , return_tensors="pt" ) SCREAMING_SNAKE_CASE = encoding["pixel_values"] logger.info(F"Converting model {model_name}..." ) # load original model from torch hub SCREAMING_SNAKE_CASE = torch.hub.load("DeppMeng/ConditionalDETR" , A__ , pretrained=A__ ).eval() SCREAMING_SNAKE_CASE = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: SCREAMING_SNAKE_CASE = "conditional_detr." + src rename_key(A__ , A__ , A__ ) SCREAMING_SNAKE_CASE = rename_backbone_keys(A__ ) # query, key and value matrices need special treatment read_in_q_k_v(A__ , is_panoptic=A__ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them SCREAMING_SNAKE_CASE = "conditional_detr.model." if is_panoptic else "model." for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("conditional_detr" ) and not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ) ): SCREAMING_SNAKE_CASE = state_dict.pop(A__ ) SCREAMING_SNAKE_CASE = val elif "class_labels_classifier" in key or "bbox_predictor" in key: SCREAMING_SNAKE_CASE = state_dict.pop(A__ ) SCREAMING_SNAKE_CASE = val elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ): continue else: SCREAMING_SNAKE_CASE = state_dict.pop(A__ ) SCREAMING_SNAKE_CASE = val else: if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ): SCREAMING_SNAKE_CASE = state_dict.pop(A__ ) SCREAMING_SNAKE_CASE = val # finally, create HuggingFace model and load state dict SCREAMING_SNAKE_CASE = ConditionalDetrForSegmentation(A__ ) if is_panoptic else ConditionalDetrForObjectDetection(A__ ) model.load_state_dict(A__ ) model.eval() model.push_to_hub(repo_id=A__ , organization="DepuMeng" , commit_message="Add model" ) # verify our conversion SCREAMING_SNAKE_CASE = conditional_detr(A__ ) SCREAMING_SNAKE_CASE = model(A__ ) assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 ) # Save model and image processor logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." ) Path(A__ ).mkdir(exist_ok=A__ ) model.save_pretrained(A__ ) image_processor.save_pretrained(A__ ) if __name__ == "__main__": __A : str = argparse.ArgumentParser() parser.add_argument( '--model_name', default='conditional_detr_resnet50', type=str, help='Name of the CONDITIONAL_DETR model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) __A : int = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
16
1
from pathlib import Path from typing import List from transformers import is_torch_available, is_vision_available from transformers.testing_utils import get_tests_dir, is_tool_test from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText if is_torch_available(): import torch if is_vision_available(): from PIL import Image __A : Optional[int] = ['text', 'image', 'audio'] def __a ( A__ : List[str] ): SCREAMING_SNAKE_CASE = [] for input_type in input_types: if input_type == "text": inputs.append("Text input" ) elif input_type == "image": inputs.append( Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) ) elif input_type == "audio": inputs.append(torch.ones(3000 ) ) elif isinstance(A__ , A__ ): inputs.append(create_inputs(A__ ) ) else: raise ValueError(F"Invalid type requested: {input_type}" ) return inputs def __a ( A__ : List ): SCREAMING_SNAKE_CASE = [] for output in outputs: if isinstance(A__ , (str, AgentText) ): output_types.append("text" ) elif isinstance(A__ , (Image.Image, AgentImage) ): output_types.append("image" ) elif isinstance(A__ , (torch.Tensor, AgentAudio) ): output_types.append("audio" ) else: raise ValueError(F"Invalid output: {output}" ) return output_types @is_tool_test class _SCREAMING_SNAKE_CASE : '''simple docstring''' def _snake_case ( self : str ): self.assertTrue(hasattr(self.tool , "inputs" ) ) self.assertTrue(hasattr(self.tool , "outputs" ) ) SCREAMING_SNAKE_CASE = self.tool.inputs for _input in inputs: if isinstance(_input , __lowerCamelCase ): for __input in _input: self.assertTrue(__input in authorized_types ) else: self.assertTrue(_input in authorized_types ) SCREAMING_SNAKE_CASE = self.tool.outputs for _output in outputs: self.assertTrue(_output in authorized_types ) def _snake_case ( self : Optional[Any] ): SCREAMING_SNAKE_CASE = create_inputs(self.tool.inputs ) SCREAMING_SNAKE_CASE = self.tool(*__lowerCamelCase ) # There is a single output if len(self.tool.outputs ) == 1: SCREAMING_SNAKE_CASE = [outputs] self.assertListEqual(output_types(__lowerCamelCase ) , self.tool.outputs ) def _snake_case ( self : Dict ): self.assertTrue(hasattr(self.tool , "description" ) ) self.assertTrue(hasattr(self.tool , "default_checkpoint" ) ) self.assertTrue(self.tool.description.startswith("This is a tool that" ) ) def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE = create_inputs(self.tool.inputs ) SCREAMING_SNAKE_CASE = self.tool(*__lowerCamelCase ) if not isinstance(__lowerCamelCase , __lowerCamelCase ): SCREAMING_SNAKE_CASE = [outputs] self.assertEqual(len(__lowerCamelCase ) , len(self.tool.outputs ) ) for output, output_type in zip(__lowerCamelCase , self.tool.outputs ): SCREAMING_SNAKE_CASE = AGENT_TYPE_MAPPING[output_type] self.assertTrue(isinstance(__lowerCamelCase , __lowerCamelCase ) ) def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = create_inputs(self.tool.inputs ) SCREAMING_SNAKE_CASE = [] for _input, input_type in zip(__lowerCamelCase , self.tool.inputs ): if isinstance(__lowerCamelCase , __lowerCamelCase ): _inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] ) else: _inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) ) # Should not raise an error SCREAMING_SNAKE_CASE = self.tool(*__lowerCamelCase ) if not isinstance(__lowerCamelCase , __lowerCamelCase ): SCREAMING_SNAKE_CASE = [outputs] self.assertEqual(len(__lowerCamelCase ) , len(self.tool.outputs ) )
16
from __future__ import annotations def __a ( A__ : list[int | str] ): create_state_space_tree(A__ , [] , 0 , [0 for i in range(len(A__ ) )] ) def __a ( A__ : list[int | str] , A__ : list[int | str] , A__ : int , A__ : list[int] , ): if index == len(A__ ): print(A__ ) return for i in range(len(A__ ) ): if not index_used[i]: current_sequence.append(sequence[i] ) SCREAMING_SNAKE_CASE = True create_state_space_tree(A__ , A__ , index + 1 , A__ ) current_sequence.pop() SCREAMING_SNAKE_CASE = False __A : list[int | str] = [3, 1, 2, 4] generate_all_permutations(sequence) __A : list[int | str] = ["A", "B", "C"] generate_all_permutations(sequence_a)
16
1
import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() __A : int = logging.get_logger(__name__) __A : str = [ ['attention', 'attn'], ['encoder_attention', 'encoder_attn'], ['q_lin', 'q_proj'], ['k_lin', 'k_proj'], ['v_lin', 'v_proj'], ['out_lin', 'out_proj'], ['norm_embeddings', 'layernorm_embedding'], ['position_embeddings', 'embed_positions'], ['embeddings', 'embed_tokens'], ['ffn.lin', 'fc'], ] def __a ( A__ : Optional[Any] ): if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: SCREAMING_SNAKE_CASE = k.replace(A__ , A__ ) if k.startswith("encoder" ): SCREAMING_SNAKE_CASE = k.replace(".attn" , ".self_attn" ) SCREAMING_SNAKE_CASE = k.replace("norm1" , "self_attn_layer_norm" ) SCREAMING_SNAKE_CASE = k.replace("norm2" , "final_layer_norm" ) elif k.startswith("decoder" ): SCREAMING_SNAKE_CASE = k.replace("norm1" , "self_attn_layer_norm" ) SCREAMING_SNAKE_CASE = k.replace("norm2" , "encoder_attn_layer_norm" ) SCREAMING_SNAKE_CASE = k.replace("norm3" , "final_layer_norm" ) return k def __a ( A__ : List[Any] ): SCREAMING_SNAKE_CASE = [ "model.encoder.layernorm_embedding.weight", "model.encoder.layernorm_embedding.bias", "model.decoder.layernorm_embedding.weight", "model.decoder.layernorm_embedding.bias", ] for k in keys: SCREAMING_SNAKE_CASE = sd.pop(A__ ) SCREAMING_SNAKE_CASE = k.replace("layernorm_embedding" , "layer_norm" ) assert new_k not in sd SCREAMING_SNAKE_CASE = v __A : Tuple = ['START'] @torch.no_grad() def __a ( A__ : List[Any] , A__ : Union[str, Any] , A__ : Any ): SCREAMING_SNAKE_CASE = torch.load(A__ , map_location="cpu" ) SCREAMING_SNAKE_CASE = model["model"] SCREAMING_SNAKE_CASE = BlenderbotConfig.from_json_file(A__ ) SCREAMING_SNAKE_CASE = BlenderbotForConditionalGeneration(A__ ) SCREAMING_SNAKE_CASE = m.model.state_dict().keys() SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue SCREAMING_SNAKE_CASE = rename_state_dict_key(A__ ) if new_k not in valid_keys: failures.append([k, new_k] ) else: SCREAMING_SNAKE_CASE = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(A__ ) m.model.load_state_dict(A__ , strict=A__ ) m.half() m.save_pretrained(A__ ) if __name__ == "__main__": __A : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin') parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.') parser.add_argument( '--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use' ) __A : Optional[int] = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
16
def __a ( A__ : int = 1000 ): SCREAMING_SNAKE_CASE = 3 SCREAMING_SNAKE_CASE = 0 while a < n: if a % 3 == 0 or a % 5 == 0: result += a elif a % 15 == 0: result -= a a += 1 return result if __name__ == "__main__": print(f'{solution() = }')
16
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) __A : Dict = { 'configuration_owlvit': [ 'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OwlViTConfig', 'OwlViTOnnxConfig', 'OwlViTTextConfig', 'OwlViTVisionConfig', ], 'processing_owlvit': ['OwlViTProcessor'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Tuple = ['OwlViTFeatureExtractor'] __A : str = ['OwlViTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Dict = [ 'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'OwlViTModel', 'OwlViTPreTrainedModel', 'OwlViTTextModel', 'OwlViTVisionModel', 'OwlViTForObjectDetection', ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys __A : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
16
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A : Dict = { 'configuration_bigbird_pegasus': [ 'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BigBirdPegasusConfig', 'BigBirdPegasusOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : int = [ 'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST', 'BigBirdPegasusForCausalLM', 'BigBirdPegasusForConditionalGeneration', 'BigBirdPegasusForQuestionAnswering', 'BigBirdPegasusForSequenceClassification', 'BigBirdPegasusModel', 'BigBirdPegasusPreTrainedModel', ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys __A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
16
1
from __future__ import annotations def __a ( A__ : list , A__ : int , A__ : int , A__ : int ): SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0 ) ) SCREAMING_SNAKE_CASE = result + left + right return input_list def __a ( A__ : list ): if len(A__ ) <= 1: return input_list SCREAMING_SNAKE_CASE = list(A__ ) # iteration for two-way merging SCREAMING_SNAKE_CASE = 2 while p <= len(A__ ): # getting low, high and middle value for merge-sort of single list for i in range(0 , len(A__ ) , A__ ): SCREAMING_SNAKE_CASE = i SCREAMING_SNAKE_CASE = i + p - 1 SCREAMING_SNAKE_CASE = (low + high + 1) // 2 SCREAMING_SNAKE_CASE = merge(A__ , A__ , A__ , A__ ) # final merge of last two parts if p * 2 >= len(A__ ): SCREAMING_SNAKE_CASE = i SCREAMING_SNAKE_CASE = merge(A__ , 0 , A__ , len(A__ ) - 1 ) break p *= 2 return input_list if __name__ == "__main__": __A : Any = input('Enter numbers separated by a comma:\n').strip() if user_input == "": __A : Union[str, Any] = [] else: __A : Optional[Any] = [int(item.strip()) for item in user_input.split(',')] print(iter_merge_sort(unsorted))
16
import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Any ): SCREAMING_SNAKE_CASE = "hf-internal-testing/tiny-random-t5" SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer("This is me" , return_tensors="pt" ) SCREAMING_SNAKE_CASE = model.to_bettertransformer() self.assertTrue(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) SCREAMING_SNAKE_CASE = model.generate(**__lowerCamelCase ) SCREAMING_SNAKE_CASE = model.reverse_bettertransformer() self.assertFalse(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase ) self.assertFalse( any("BetterTransformer" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) SCREAMING_SNAKE_CASE = model_reloaded.generate(**__lowerCamelCase ) self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase ) ) def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = "hf-internal-testing/tiny-random-t5" SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(__lowerCamelCase ): model.save_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = model.reverse_bettertransformer() model.save_pretrained(__lowerCamelCase )
16
1
import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' def __init__( self : Dict , __lowerCamelCase : int , __lowerCamelCase : str=13 , __lowerCamelCase : Optional[Any]=7 , __lowerCamelCase : Any=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]=False , __lowerCamelCase : List[Any]=True , __lowerCamelCase : int=99 , __lowerCamelCase : Tuple=32 , __lowerCamelCase : Optional[Any]=5 , __lowerCamelCase : int=4 , __lowerCamelCase : Tuple=37 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : str=0.1 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Tuple=512 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : Union[str, Any]=0.02 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : int=None , ): SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = seq_length SCREAMING_SNAKE_CASE = is_training SCREAMING_SNAKE_CASE = use_input_mask SCREAMING_SNAKE_CASE = use_token_type_ids SCREAMING_SNAKE_CASE = use_labels SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = type_vocab_size SCREAMING_SNAKE_CASE = type_sequence_label_size SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = num_labels SCREAMING_SNAKE_CASE = num_choices SCREAMING_SNAKE_CASE = scope def _snake_case ( self : Any ): SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE = None if self.use_input_mask: SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None if self.use_labels: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _snake_case ( self : Dict ): return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def _snake_case ( self : Any , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : int ): SCREAMING_SNAKE_CASE = DistilBertModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case ( self : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] ): SCREAMING_SNAKE_CASE = DistilBertForMaskedLM(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ): SCREAMING_SNAKE_CASE = DistilBertForQuestionAnswering(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model( __lowerCamelCase , attention_mask=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : str ): SCREAMING_SNAKE_CASE = self.num_labels SCREAMING_SNAKE_CASE = DistilBertForSequenceClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] ): SCREAMING_SNAKE_CASE = self.num_labels SCREAMING_SNAKE_CASE = DistilBertForTokenClassification(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _snake_case ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] ): SCREAMING_SNAKE_CASE = self.num_choices SCREAMING_SNAKE_CASE = DistilBertForMultipleChoice(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE = model( __lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _snake_case ( self : Any ): SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = config_and_inputs SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = ( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) lowerCamelCase__ = ( { "feature-extraction": DistilBertModel, "fill-mask": DistilBertForMaskedLM, "question-answering": DistilBertForQuestionAnswering, "text-classification": DistilBertForSequenceClassification, "token-classification": DistilBertForTokenClassification, "zero-shot": DistilBertForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase__ = True lowerCamelCase__ = True lowerCamelCase__ = True lowerCamelCase__ = True def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = DistilBertModelTester(self ) SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__lowerCamelCase , dim=37 ) def _snake_case ( self : Optional[Any] ): self.config_tester.run_common_tests() def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*__lowerCamelCase ) def _snake_case ( self : List[Any] ): SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*__lowerCamelCase ) def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*__lowerCamelCase ) def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*__lowerCamelCase ) def _snake_case ( self : Optional[Any] ): SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*__lowerCamelCase ) def _snake_case ( self : Tuple ): SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*__lowerCamelCase ) @slow def _snake_case ( self : Any ): for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE = DistilBertModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) @slow @require_torch_gpu def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = model_class(config=__lowerCamelCase ) SCREAMING_SNAKE_CASE = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = torch.jit.trace( __lowerCamelCase , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(__lowerCamelCase , os.path.join(__lowerCamelCase , "traced_model.pt" ) ) SCREAMING_SNAKE_CASE = torch.jit.load(os.path.join(__lowerCamelCase , "traced_model.pt" ) , map_location=__lowerCamelCase ) loaded(inputs_dict["input_ids"].to(__lowerCamelCase ) , inputs_dict["attention_mask"].to(__lowerCamelCase ) ) @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @slow def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = DistilBertModel.from_pretrained("distilbert-base-uncased" ) SCREAMING_SNAKE_CASE = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0] SCREAMING_SNAKE_CASE = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __lowerCamelCase ) SCREAMING_SNAKE_CASE = torch.tensor( [[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1e-4 ) )
16
import copy import inspect import unittest from transformers import PretrainedConfig, SwiftFormerConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwiftFormerForImageClassification, SwiftFormerModel from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : int=13 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : str=True , __lowerCamelCase : Any=True , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Optional[int]=224 , __lowerCamelCase : Any=1000 , __lowerCamelCase : Optional[Any]=[3, 3, 6, 4] , __lowerCamelCase : List[Any]=[48, 56, 112, 220] , ): SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = is_training SCREAMING_SNAKE_CASE = use_labels SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = num_labels SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = layer_depths SCREAMING_SNAKE_CASE = embed_dims def _snake_case ( self : Optional[Any] ): SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE = None if self.use_labels: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels ) SCREAMING_SNAKE_CASE = self.get_config() return config, pixel_values, labels def _snake_case ( self : Dict ): return SwiftFormerConfig( depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=__lowerCamelCase , layer_scale_init_value=1e-5 , ) def _snake_case ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ): SCREAMING_SNAKE_CASE = SwiftFormerModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) ) def _snake_case ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] ): SCREAMING_SNAKE_CASE = self.num_labels SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE = model(__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self : int ): ((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else () lowerCamelCase__ = ( {"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification} if is_torch_available() else {} ) lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = SwiftFormerModelTester(self ) SCREAMING_SNAKE_CASE = ConfigTester( self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , ) def _snake_case ( self : List[Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="SwiftFormer does not use inputs_embeds" ) def _snake_case ( self : Optional[int] ): pass def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) SCREAMING_SNAKE_CASE = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) ) def _snake_case ( self : List[Any] ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) SCREAMING_SNAKE_CASE = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def _snake_case ( self : Tuple ): for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE = SwiftFormerModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) @unittest.skip(reason="SwiftFormer does not output attentions" ) def _snake_case ( self : Union[str, Any] ): pass def _snake_case ( self : Optional[Any] ): def check_hidden_states_output(__lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Tuple ): SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) SCREAMING_SNAKE_CASE = outputs.hidden_states SCREAMING_SNAKE_CASE = 8 self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase ) # TODO # SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width) # with the width and height being successively divided by 2, after every 2 blocks for i in range(len(__lowerCamelCase ) ): self.assertEqual( hidden_states[i].shape , torch.Size( [ self.model_tester.batch_size, self.model_tester.embed_dims[i // 2], (self.model_tester.image_size // 4) // 2 ** (i // 2), (self.model_tester.image_size // 4) // 2 ** (i // 2), ] ) , ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def _snake_case ( self : List[Any] ): def _config_zero_init(__lowerCamelCase : List[Any] ): SCREAMING_SNAKE_CASE = copy.deepcopy(__lowerCamelCase ) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(__lowerCamelCase , __lowerCamelCase , 1e-10 ) if isinstance(getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ): SCREAMING_SNAKE_CASE = _config_zero_init(getattr(__lowerCamelCase , __lowerCamelCase ) ) setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) return configs_no_init SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE = _config_zero_init(__lowerCamelCase ) for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = model_class(config=__lowerCamelCase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , ) @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def _snake_case ( self : str ): pass def __a ( ): SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @cached_property def _snake_case ( self : List[str] ): return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None @slow def _snake_case ( self : List[Any] ): SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.default_image_processor SCREAMING_SNAKE_CASE = prepare_img() SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ) # verify the logits SCREAMING_SNAKE_CASE = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) SCREAMING_SNAKE_CASE = torch.tensor([[-2.17_03e00, 2.11_07e00, -2.08_11e00]] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
16
1
import inspect import tempfile import unittest from huggingface_hub import hf_hub_download from transformers import is_torch_available from transformers.testing_utils import is_flaky, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin __A : Optional[Any] = 1e-4 if is_torch_available(): import torch from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder @require_torch class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str]=16 , __lowerCamelCase : int=13 , __lowerCamelCase : Any=7 , __lowerCamelCase : List[str]=14 , __lowerCamelCase : str=10 , __lowerCamelCase : Dict=19 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : Any=4 , __lowerCamelCase : Any=True , __lowerCamelCase : Union[str, Any]=16 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : List[str]=4 , __lowerCamelCase : Optional[Any]="gelu" , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : str=[1, 2, 3, 4, 5] , __lowerCamelCase : List[Any]=25 , __lowerCamelCase : Any=5 , ): SCREAMING_SNAKE_CASE = d_model SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = prediction_length SCREAMING_SNAKE_CASE = context_length SCREAMING_SNAKE_CASE = cardinality SCREAMING_SNAKE_CASE = num_time_features SCREAMING_SNAKE_CASE = lags_sequence SCREAMING_SNAKE_CASE = embedding_dimension SCREAMING_SNAKE_CASE = is_training SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = context_length SCREAMING_SNAKE_CASE = prediction_length + label_length SCREAMING_SNAKE_CASE = label_length SCREAMING_SNAKE_CASE = moving_average SCREAMING_SNAKE_CASE = autocorrelation_factor def _snake_case ( self : Dict ): return AutoformerConfig( d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , ) def _snake_case ( self : Dict , __lowerCamelCase : Dict ): SCREAMING_SNAKE_CASE = config.context_length + max(config.lags_sequence ) SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, 1] , config.cardinality[0] ) SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, _past_length, config.num_time_features] ) SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, _past_length] ) SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, _past_length] ) > 0.5 # decoder inputs SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] ) SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, config.prediction_length] ) SCREAMING_SNAKE_CASE = { "past_values": past_values, "static_categorical_features": static_categorical_features, "past_time_features": past_time_features, "past_observed_mask": past_observed_mask, "future_time_features": future_time_features, "future_values": future_values, } return inputs_dict def _snake_case ( self : Optional[Any] ): SCREAMING_SNAKE_CASE = self.get_config() SCREAMING_SNAKE_CASE = self.prepare_autoformer_inputs_dict(__lowerCamelCase ) return config, inputs_dict def _snake_case ( self : int ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() return config, inputs_dict def _snake_case ( self : int , __lowerCamelCase : int , __lowerCamelCase : Dict ): SCREAMING_SNAKE_CASE = AutoformerModel(config=__lowerCamelCase ).to(__lowerCamelCase ).eval() SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ) SCREAMING_SNAKE_CASE = outputs.encoder_last_hidden_state SCREAMING_SNAKE_CASE = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE = model.get_encoder() encoder.save_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = AutoformerEncoder.from_pretrained(__lowerCamelCase ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = model.create_network_inputs(**__lowerCamelCase ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] ) SCREAMING_SNAKE_CASE = torch.cat( (transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , ) SCREAMING_SNAKE_CASE = encoder(inputs_embeds=__lowerCamelCase )[0] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 ) SCREAMING_SNAKE_CASE = ( torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 ) .unsqueeze(1 ) .repeat(1 , config.prediction_length , 1 ) ) SCREAMING_SNAKE_CASE = torch.zeros( [transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , ) SCREAMING_SNAKE_CASE = torch.cat( ( torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) SCREAMING_SNAKE_CASE = torch.cat( ( torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE = model.get_decoder() decoder.save_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = AutoformerDecoder.from_pretrained(__lowerCamelCase ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = decoder( trend=__lowerCamelCase , inputs_embeds=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 ) @require_torch class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else () lowerCamelCase__ = (AutoformerForPrediction,) if is_torch_available() else () lowerCamelCase__ = {"feature-extraction": AutoformerModel} if is_torch_available() else {} lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE = AutoformerModelTester(self ) SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase ) def _snake_case ( self : int ): self.config_tester.run_common_tests() def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = model_class.from_pretrained(__lowerCamelCase , output_loading_info=__lowerCamelCase ) self.assertEqual(info["missing_keys"] , [] ) def _snake_case ( self : str ): SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*__lowerCamelCase ) @unittest.skip(reason="Model has no tokens embeddings" ) def _snake_case ( self : Tuple ): pass def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = inspect.signature(getattr(__lowerCamelCase , "forward" ) ) # The main input is the name of the argument after `self` SCREAMING_SNAKE_CASE = list(model_signature.parameters.keys() )[1] self.assertEqual(AutoformerModel.main_input_name , __lowerCamelCase ) def _snake_case ( self : Tuple ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) SCREAMING_SNAKE_CASE = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE = [ "past_values", "past_time_features", "past_observed_mask", "static_categorical_features", "static_real_features", "future_values", "future_time_features", ] if model.__class__.__name__ in ["AutoformerForPrediction"]: expected_arg_names.append("future_observed_mask" ) expected_arg_names.extend( [ "decoder_attention_mask", "head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs", "past_key_values", "output_hidden_states", "output_attentions", "use_cache", "return_dict", ] ) self.assertListEqual(arg_names[: len(__lowerCamelCase )] , __lowerCamelCase ) def _snake_case ( self : str ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = getattr(self.model_tester , "seq_length" , __lowerCamelCase ) SCREAMING_SNAKE_CASE = getattr(self.model_tester , "decoder_seq_length" , __lowerCamelCase ) SCREAMING_SNAKE_CASE = getattr(self.model_tester , "encoder_seq_length" , __lowerCamelCase ) SCREAMING_SNAKE_CASE = getattr(self.model_tester , "d_model" , __lowerCamelCase ) SCREAMING_SNAKE_CASE = getattr(self.model_tester , "num_attention_heads" , __lowerCamelCase ) SCREAMING_SNAKE_CASE = d_model // num_attention_heads for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) SCREAMING_SNAKE_CASE = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) SCREAMING_SNAKE_CASE = outputs.encoder_attentions self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) SCREAMING_SNAKE_CASE = len(__lowerCamelCase ) SCREAMING_SNAKE_CASE = 7 if "last_hidden_state" in outputs: correct_outlen += 1 if "trend" in outputs: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned if "loss" in outputs: correct_outlen += 1 if "params" in outputs: correct_outlen += 1 self.assertEqual(__lowerCamelCase , __lowerCamelCase ) # decoder attentions SCREAMING_SNAKE_CASE = outputs.decoder_attentions self.assertIsInstance(__lowerCamelCase , (list, tuple) ) self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # cross attentions SCREAMING_SNAKE_CASE = outputs.cross_attentions self.assertIsInstance(__lowerCamelCase , (list, tuple) ) self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # Check attention is always last and order is fine SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) self.assertEqual(out_len + 2 , len(__lowerCamelCase ) ) SCREAMING_SNAKE_CASE = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) @is_flaky() def _snake_case ( self : int ): super().test_retain_grad_hidden_states_attentions() def __a ( A__ : Optional[int]="train-batch.pt" ): SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=A__ , repo_type="dataset" ) SCREAMING_SNAKE_CASE = torch.load(A__ , map_location=A__ ) return batch @require_torch @slow class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = prepare_batch() with torch.no_grad(): SCREAMING_SNAKE_CASE = model( past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0] SCREAMING_SNAKE_CASE = torch.Size( (64, model.config.prediction_length + model.config.label_length, model.config.feature_size) ) self.assertEqual(output.shape , __lowerCamelCase ) SCREAMING_SNAKE_CASE = torch.tensor( [[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=__lowerCamelCase ) self.assertTrue(torch.allclose(output[0, :3, :3] , __lowerCamelCase , atol=__lowerCamelCase ) ) def _snake_case ( self : List[Any] ): SCREAMING_SNAKE_CASE = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = prepare_batch("val-batch.pt" ) with torch.no_grad(): SCREAMING_SNAKE_CASE = model( past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state SCREAMING_SNAKE_CASE = torch.Size((64, model.config.context_length, model.config.d_model) ) self.assertEqual(output.shape , __lowerCamelCase ) SCREAMING_SNAKE_CASE = torch.tensor( [[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=__lowerCamelCase ) self.assertTrue(torch.allclose(output[0, :3, :3] , __lowerCamelCase , atol=__lowerCamelCase ) ) def _snake_case ( self : Optional[Any] ): SCREAMING_SNAKE_CASE = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = prepare_batch("val-batch.pt" ) with torch.no_grad(): SCREAMING_SNAKE_CASE = model.generate( static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , ) SCREAMING_SNAKE_CASE = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) ) self.assertEqual(outputs.sequences.shape , __lowerCamelCase ) SCREAMING_SNAKE_CASE = torch.tensor([3_130.6_763, 4_056.5_293, 7_053.0_786] , device=__lowerCamelCase ) SCREAMING_SNAKE_CASE = outputs.sequences.mean(dim=1 ) self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __lowerCamelCase , rtol=1e-1 ) )
16
import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings __A : Optional[Any] = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = field(default=__snake_case , metadata={"help": "Whether to use SortishSampler or not."} ) lowerCamelCase__ = field( default=__snake_case , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} ) lowerCamelCase__ = field( default=__snake_case , metadata={ "help": ( "The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `max_length` value of the model configuration." ) } , ) lowerCamelCase__ = field( default=__snake_case , metadata={ "help": ( "The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `num_beams` value of the model configuration." ) } , ) lowerCamelCase__ = field( default=__snake_case , metadata={ "help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction." } , ) def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = super().to_dict() for k, v in d.items(): if isinstance(__lowerCamelCase , __lowerCamelCase ): SCREAMING_SNAKE_CASE = v.to_dict() return d
16
1
import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __A : List[Any] = logging.get_logger(__name__) __A : Dict = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } __A : Optional[int] = { 'vocab_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json' }, 'merges_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt' }, 'tokenizer_config_file': { 'facebook/blenderbot_small-90M': ( 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json' ) }, } __A : Any = {'facebook/blenderbot_small-90M': 5_1_2} def __a ( A__ : Dict ): SCREAMING_SNAKE_CASE = set() SCREAMING_SNAKE_CASE = word[0] for char in word[1:]: pairs.add((prev_char, char) ) SCREAMING_SNAKE_CASE = char SCREAMING_SNAKE_CASE = set(A__ ) return pairs class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ = ["input_ids", "attention_mask"] def __init__( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : str="__start__" , __lowerCamelCase : List[str]="__end__" , __lowerCamelCase : Any="__unk__" , __lowerCamelCase : Optional[Any]="__null__" , **__lowerCamelCase : Any , ): super().__init__(unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , pad_token=__lowerCamelCase , **__lowerCamelCase ) with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle: SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase ) SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()} with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle: SCREAMING_SNAKE_CASE = merges_handle.read().split("\n" )[1:-1] SCREAMING_SNAKE_CASE = [tuple(merge.split() ) for merge in merges] SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) SCREAMING_SNAKE_CASE = {} @property def _snake_case ( self : Optional[int] ): return len(self.encoder ) def _snake_case ( self : Dict ): return dict(self.encoder , **self.added_tokens_encoder ) def _snake_case ( self : List[Any] , __lowerCamelCase : str ): if token in self.cache: return self.cache[token] SCREAMING_SNAKE_CASE = re.sub("([.,!?()])" , r" \1" , __lowerCamelCase ) SCREAMING_SNAKE_CASE = re.sub("(')" , r" \1 " , __lowerCamelCase ) SCREAMING_SNAKE_CASE = re.sub(r"\s{2,}" , " " , __lowerCamelCase ) if "\n" in token: SCREAMING_SNAKE_CASE = token.replace("\n" , " __newln__" ) SCREAMING_SNAKE_CASE = token.split(" " ) SCREAMING_SNAKE_CASE = [] for token in tokens: if not len(__lowerCamelCase ): continue SCREAMING_SNAKE_CASE = token.lower() SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase ) SCREAMING_SNAKE_CASE = tuple(list(word[:-1] ) + [word[-1] + "</w>"] ) SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase ) if not pairs: words.append(__lowerCamelCase ) continue while True: SCREAMING_SNAKE_CASE = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = bigram SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = 0 while i < len(__lowerCamelCase ): try: SCREAMING_SNAKE_CASE = word.index(__lowerCamelCase , __lowerCamelCase ) new_word.extend(word[i:j] ) SCREAMING_SNAKE_CASE = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase ) SCREAMING_SNAKE_CASE = new_word if len(__lowerCamelCase ) == 1: break else: SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase ) SCREAMING_SNAKE_CASE = "@@ ".join(__lowerCamelCase ) SCREAMING_SNAKE_CASE = word[:-4] SCREAMING_SNAKE_CASE = word words.append(__lowerCamelCase ) return " ".join(__lowerCamelCase ) def _snake_case ( self : str , __lowerCamelCase : str ): SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = re.findall(r"\S+\n?" , __lowerCamelCase ) for token in words: split_tokens.extend(list(self.bpe(__lowerCamelCase ).split(" " ) ) ) return split_tokens def _snake_case ( self : Union[str, Any] , __lowerCamelCase : str ): SCREAMING_SNAKE_CASE = token.lower() return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) ) def _snake_case ( self : Union[str, Any] , __lowerCamelCase : int ): return self.decoder.get(__lowerCamelCase , self.unk_token ) def _snake_case ( self : int , __lowerCamelCase : List[str] ): SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase ).replace("@@ " , "" ).strip() return out_string def _snake_case ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): if not os.path.isdir(__lowerCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return SCREAMING_SNAKE_CASE = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) SCREAMING_SNAKE_CASE = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" ) SCREAMING_SNAKE_CASE = 0 with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) SCREAMING_SNAKE_CASE = token_index writer.write(" ".join(__lowerCamelCase ) + "\n" ) index += 1 return vocab_file, merge_file
16
import os def __a ( ): SCREAMING_SNAKE_CASE = os.path.join(os.path.dirname(A__ ) , "num.txt" ) with open(A__ ) as file_hand: return str(sum(int(A__ ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
16
1
import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig __A : List[Any] = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict ): SCREAMING_SNAKE_CASE = question_encoder SCREAMING_SNAKE_CASE = generator SCREAMING_SNAKE_CASE = self.question_encoder def _snake_case ( self : Union[str, Any] , __lowerCamelCase : List[str] ): if os.path.isfile(__lowerCamelCase ): raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file" ) os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , "question_encoder_tokenizer" ) SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , "generator_tokenizer" ) self.question_encoder.save_pretrained(__lowerCamelCase ) self.generator.save_pretrained(__lowerCamelCase ) @classmethod def _snake_case ( cls : Tuple , __lowerCamelCase : str , **__lowerCamelCase : Optional[Any] ): # dynamically import AutoTokenizer from ..auto.tokenization_auto import AutoTokenizer SCREAMING_SNAKE_CASE = kwargs.pop("config" , __lowerCamelCase ) if config is None: SCREAMING_SNAKE_CASE = RagConfig.from_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained( __lowerCamelCase , config=config.question_encoder , subfolder="question_encoder_tokenizer" ) SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained( __lowerCamelCase , config=config.generator , subfolder="generator_tokenizer" ) return cls(question_encoder=__lowerCamelCase , generator=__lowerCamelCase ) def __call__( self : int , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Any ): return self.current_tokenizer(*__lowerCamelCase , **__lowerCamelCase ) def _snake_case ( self : Union[str, Any] , *__lowerCamelCase : Dict , **__lowerCamelCase : Union[str, Any] ): return self.generator.batch_decode(*__lowerCamelCase , **__lowerCamelCase ) def _snake_case ( self : List[str] , *__lowerCamelCase : Any , **__lowerCamelCase : Optional[int] ): return self.generator.decode(*__lowerCamelCase , **__lowerCamelCase ) def _snake_case ( self : Optional[Any] ): SCREAMING_SNAKE_CASE = self.question_encoder def _snake_case ( self : List[Any] ): SCREAMING_SNAKE_CASE = self.generator def _snake_case ( self : str , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[List[str]] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : str = "longest" , __lowerCamelCase : str = None , __lowerCamelCase : bool = True , **__lowerCamelCase : List[str] , ): warnings.warn( "`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the " "regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` " "context manager to prepare your targets. See the documentation of your specific tokenizer for more " "details" , __lowerCamelCase , ) if max_length is None: SCREAMING_SNAKE_CASE = self.current_tokenizer.model_max_length SCREAMING_SNAKE_CASE = self( __lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , max_length=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , **__lowerCamelCase , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: SCREAMING_SNAKE_CASE = self.current_tokenizer.model_max_length SCREAMING_SNAKE_CASE = self( text_target=__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , padding=__lowerCamelCase , max_length=__lowerCamelCase , truncation=__lowerCamelCase , **__lowerCamelCase , ) SCREAMING_SNAKE_CASE = labels["input_ids"] return model_inputs
16
import pytest __A : Optional[Any] = '__dummy_dataset1__' __A : Optional[int] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n' @pytest.fixture def __a ( ): return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def __a ( ): return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def __a ( A__ : Optional[Any] , A__ : List[str] , A__ : Optional[int] ): SCREAMING_SNAKE_CASE = dataset_loading_script_name SCREAMING_SNAKE_CASE = tmp_path / "datasets" / script_name script_dir.mkdir(parents=A__ ) SCREAMING_SNAKE_CASE = script_dir / F"{script_name}.py" with open(A__ , "w" ) as f: f.write(A__ ) return str(A__ )
16
1
from collections.abc import Callable def __a ( A__ : Callable[[float], float] , A__ : float , A__ : float ): SCREAMING_SNAKE_CASE = a SCREAMING_SNAKE_CASE = b if function(A__ ) == 0: # one of the a or b is a root for the function return a elif function(A__ ) == 0: return b elif ( function(A__ ) * function(A__ ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError("could not find root in given interval." ) else: SCREAMING_SNAKE_CASE = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(A__ ) == 0: return mid elif function(A__ ) * function(A__ ) < 0: SCREAMING_SNAKE_CASE = mid else: SCREAMING_SNAKE_CASE = mid SCREAMING_SNAKE_CASE = start + (end - start) / 2.0 return mid def __a ( A__ : float ): return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1_0_0_0)) import doctest doctest.testmod()
16
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert import BertTokenizer __A : str = logging.get_logger(__name__) __A : Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} __A : Tuple = { 'vocab_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } __A : Optional[Any] = { 'vocab_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } __A : str = { 'vocab_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json' ), }, } __A : Optional[Any] = { 'facebook/dpr-ctx_encoder-single-nq-base': 5_1_2, 'facebook/dpr-ctx_encoder-multiset-base': 5_1_2, } __A : List[str] = { 'facebook/dpr-question_encoder-single-nq-base': 5_1_2, 'facebook/dpr-question_encoder-multiset-base': 5_1_2, } __A : Any = { 'facebook/dpr-reader-single-nq-base': 5_1_2, 'facebook/dpr-reader-multiset-base': 5_1_2, } __A : str = { 'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True}, } __A : Any = { 'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True}, } __A : Dict = { 'facebook/dpr-reader-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-reader-multiset-base': {'do_lower_case': True}, } class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION __A : Optional[int] = collections.namedtuple( 'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text'] ) __A : List[Any] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits']) __A : List[Any] = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n ' @add_start_docstrings(__snake_case ) class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __call__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Union[bool, str] = False , __lowerCamelCase : Union[bool, str] = False , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : Optional[bool] = None , **__lowerCamelCase : Any , ): if titles is None and texts is None: return super().__call__( __lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , ) elif titles is None or texts is None: SCREAMING_SNAKE_CASE = titles if texts is None else texts return super().__call__( __lowerCamelCase , __lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , ) SCREAMING_SNAKE_CASE = titles if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [titles] SCREAMING_SNAKE_CASE = texts if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [texts] SCREAMING_SNAKE_CASE = len(__lowerCamelCase ) SCREAMING_SNAKE_CASE = questions if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [questions] * n_passages if len(__lowerCamelCase ) != len(__lowerCamelCase ): raise ValueError( f"There should be as many titles than texts but got {len(__lowerCamelCase )} titles and {len(__lowerCamelCase )} texts." ) SCREAMING_SNAKE_CASE = super().__call__(__lowerCamelCase , __lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase )["input_ids"] SCREAMING_SNAKE_CASE = super().__call__(__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase )["input_ids"] SCREAMING_SNAKE_CASE = { "input_ids": [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(__lowerCamelCase , __lowerCamelCase ) ] } if return_attention_mask is not False: SCREAMING_SNAKE_CASE = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) SCREAMING_SNAKE_CASE = attention_mask return self.pad(__lowerCamelCase , padding=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase ) def _snake_case ( self : Tuple , __lowerCamelCase : BatchEncoding , __lowerCamelCase : DPRReaderOutput , __lowerCamelCase : int = 16 , __lowerCamelCase : int = 64 , __lowerCamelCase : int = 4 , ): SCREAMING_SNAKE_CASE = reader_input["input_ids"] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = reader_output[:3] SCREAMING_SNAKE_CASE = len(__lowerCamelCase ) SCREAMING_SNAKE_CASE = sorted(range(__lowerCamelCase ) , reverse=__lowerCamelCase , key=relevance_logits.__getitem__ ) SCREAMING_SNAKE_CASE = [] for doc_id in sorted_docs: SCREAMING_SNAKE_CASE = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence SCREAMING_SNAKE_CASE = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: SCREAMING_SNAKE_CASE = sequence_ids.index(self.pad_token_id ) else: SCREAMING_SNAKE_CASE = len(__lowerCamelCase ) SCREAMING_SNAKE_CASE = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__lowerCamelCase , top_spans=__lowerCamelCase , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__lowerCamelCase , start_index=__lowerCamelCase , end_index=__lowerCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(__lowerCamelCase ) >= num_spans: break return nbest_spans_predictions[:num_spans] def _snake_case ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : List[int] , __lowerCamelCase : int , __lowerCamelCase : int , ): SCREAMING_SNAKE_CASE = [] for start_index, start_score in enumerate(__lowerCamelCase ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) SCREAMING_SNAKE_CASE = sorted(__lowerCamelCase , key=lambda __lowerCamelCase : x[1] , reverse=__lowerCamelCase ) SCREAMING_SNAKE_CASE = [] for (start_index, end_index), score in scores: if start_index > end_index: raise ValueError(f"Wrong span indices: [{start_index}:{end_index}]" ) SCREAMING_SNAKE_CASE = end_index - start_index + 1 if length > max_answer_length: raise ValueError(f"Span is too long: {length} > {max_answer_length}" ) if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(__lowerCamelCase ) == top_spans: break return chosen_span_intervals @add_end_docstrings(__snake_case ) class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ): '''simple docstring''' lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ = READER_PRETRAINED_INIT_CONFIGURATION lowerCamelCase__ = ["input_ids", "attention_mask"]
16
1
import os import re from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __A : Union[str, Any] = logging.get_logger(__name__) __A : int = { 'vocab_file': 'vocab.txt', 'merges_file': 'bpe.codes', } __A : List[str] = { 'vocab_file': { 'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt', 'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt', }, 'merges_file': { 'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes', 'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes', }, } __A : Tuple = { 'vinai/phobert-base': 2_5_6, 'vinai/phobert-large': 2_5_6, } def __a ( A__ : List[Any] ): SCREAMING_SNAKE_CASE = set() SCREAMING_SNAKE_CASE = word[0] for char in word[1:]: pairs.add((prev_char, char) ) SCREAMING_SNAKE_CASE = char SCREAMING_SNAKE_CASE = set(A__ ) return pairs class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : List[str]="<s>" , __lowerCamelCase : int="</s>" , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Tuple="<s>" , __lowerCamelCase : List[str]="<unk>" , __lowerCamelCase : Optional[Any]="<pad>" , __lowerCamelCase : Dict="<mask>" , **__lowerCamelCase : Optional[Any] , ): super().__init__( bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , ) SCREAMING_SNAKE_CASE = vocab_file SCREAMING_SNAKE_CASE = merges_file SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = 1 SCREAMING_SNAKE_CASE = 2 SCREAMING_SNAKE_CASE = 3 self.add_from_file(__lowerCamelCase ) SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()} with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle: SCREAMING_SNAKE_CASE = merges_handle.read().split("\n" )[:-1] SCREAMING_SNAKE_CASE = [tuple(merge.split()[:-1] ) for merge in merges] SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) SCREAMING_SNAKE_CASE = {} def _snake_case ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] SCREAMING_SNAKE_CASE = [self.cls_token_id] SCREAMING_SNAKE_CASE = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _snake_case ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCamelCase )) + [1] return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1] def _snake_case ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): SCREAMING_SNAKE_CASE = [self.sep_token_id] SCREAMING_SNAKE_CASE = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _snake_case ( self : str ): return len(self.encoder ) def _snake_case ( self : Tuple ): return dict(self.encoder , **self.added_tokens_encoder ) def _snake_case ( self : List[Any] , __lowerCamelCase : List[str] ): if token in self.cache: return self.cache[token] SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase ) SCREAMING_SNAKE_CASE = tuple(list(word[:-1] ) + [word[-1] + "</w>"] ) SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase ) if not pairs: return token while True: SCREAMING_SNAKE_CASE = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = bigram SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = 0 while i < len(__lowerCamelCase ): try: SCREAMING_SNAKE_CASE = word.index(__lowerCamelCase , __lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) SCREAMING_SNAKE_CASE = j if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase ) SCREAMING_SNAKE_CASE = new_word if len(__lowerCamelCase ) == 1: break else: SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase ) SCREAMING_SNAKE_CASE = "@@ ".join(__lowerCamelCase ) SCREAMING_SNAKE_CASE = word[:-4] SCREAMING_SNAKE_CASE = word return word def _snake_case ( self : str , __lowerCamelCase : List[str] ): SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = re.findall(r"\S+\n?" , __lowerCamelCase ) for token in words: split_tokens.extend(list(self.bpe(__lowerCamelCase ).split(" " ) ) ) return split_tokens def _snake_case ( self : str , __lowerCamelCase : str ): return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) ) def _snake_case ( self : Union[str, Any] , __lowerCamelCase : List[Any] ): return self.decoder.get(__lowerCamelCase , self.unk_token ) def _snake_case ( self : int , __lowerCamelCase : Optional[Any] ): SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase ).replace("@@ " , "" ).strip() return out_string def _snake_case ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): if not os.path.isdir(__lowerCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return SCREAMING_SNAKE_CASE = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) SCREAMING_SNAKE_CASE = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ): copyfile(self.vocab_file , __lowerCamelCase ) if os.path.abspath(self.merges_file ) != os.path.abspath(__lowerCamelCase ): copyfile(self.merges_file , __lowerCamelCase ) return out_vocab_file, out_merge_file def _snake_case ( self : int , __lowerCamelCase : Optional[int] ): if isinstance(__lowerCamelCase , __lowerCamelCase ): try: with open(__lowerCamelCase , "r" , encoding="utf-8" ) as fd: self.add_from_file(__lowerCamelCase ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception(f"Incorrect encoding detected in {f}, please rebuild the dataset" ) return SCREAMING_SNAKE_CASE = f.readlines() for lineTmp in lines: SCREAMING_SNAKE_CASE = lineTmp.strip() SCREAMING_SNAKE_CASE = line.rfind(" " ) if idx == -1: raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" ) SCREAMING_SNAKE_CASE = line[:idx] SCREAMING_SNAKE_CASE = len(self.encoder )
16
from typing import Any import numpy as np def __a ( A__ : np.ndarray ): return np.array_equal(A__ , matrix.conjugate().T ) def __a ( A__ : np.ndarray , A__ : np.ndarray ): SCREAMING_SNAKE_CASE = v.conjugate().T SCREAMING_SNAKE_CASE = v_star.dot(A__ ) assert isinstance(A__ , np.ndarray ) return (v_star_dot.dot(A__ )) / (v_star.dot(A__ )) def __a ( ): SCREAMING_SNAKE_CASE = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] ) SCREAMING_SNAKE_CASE = np.array([[1], [2], [3]] ) assert is_hermitian(A__ ), F"{a} is not hermitian." print(rayleigh_quotient(A__ , A__ ) ) SCREAMING_SNAKE_CASE = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(A__ ), F"{a} is not hermitian." assert rayleigh_quotient(A__ , A__ ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
16
1
import unittest import numpy as np from diffusers import OnnxStableDiffusionInpaintPipelineLegacy from diffusers.utils.testing_utils import ( is_onnx_available, load_image, load_numpy, nightly, require_onnxruntime, require_torch_gpu, ) if is_onnx_available(): import onnxruntime as ort @nightly @require_onnxruntime @require_torch_gpu class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @property def _snake_case ( self : List[Any] ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _snake_case ( self : Optional[Any] ): SCREAMING_SNAKE_CASE = ort.SessionOptions() SCREAMING_SNAKE_CASE = False return options def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo.png" ) SCREAMING_SNAKE_CASE = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo_mask.png" ) SCREAMING_SNAKE_CASE = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy" ) # using the PNDM scheduler by default SCREAMING_SNAKE_CASE = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=__lowerCamelCase , feature_extractor=__lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) SCREAMING_SNAKE_CASE = "A red cat sitting on a park bench" SCREAMING_SNAKE_CASE = np.random.RandomState(0 ) SCREAMING_SNAKE_CASE = pipe( prompt=__lowerCamelCase , image=__lowerCamelCase , mask_image=__lowerCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=__lowerCamelCase , output_type="np" , ) SCREAMING_SNAKE_CASE = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1e-2
16
from __future__ import annotations __A : str = list[tuple[int, int]] __A : Optional[int] = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] __A : List[str] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : str , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float , __lowerCamelCase : Node | None , ): SCREAMING_SNAKE_CASE = pos_x SCREAMING_SNAKE_CASE = pos_y SCREAMING_SNAKE_CASE = (pos_y, pos_x) SCREAMING_SNAKE_CASE = goal_x SCREAMING_SNAKE_CASE = goal_y SCREAMING_SNAKE_CASE = g_cost SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = self.calculate_heuristic() def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = abs(self.pos_x - self.goal_x ) SCREAMING_SNAKE_CASE = abs(self.pos_y - self.goal_y ) return dx + dy def __lt__( self : Union[str, Any] , __lowerCamelCase : List[Any] ): return self.f_cost < other.f_cost class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Optional[int] , __lowerCamelCase : tuple[int, int] , __lowerCamelCase : tuple[int, int] ): SCREAMING_SNAKE_CASE = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __lowerCamelCase ) SCREAMING_SNAKE_CASE = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , __lowerCamelCase ) SCREAMING_SNAKE_CASE = [self.start] SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = False def _snake_case ( self : Optional[Any] ): while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() SCREAMING_SNAKE_CASE = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: SCREAMING_SNAKE_CASE = True return self.retrace_path(__lowerCamelCase ) self.closed_nodes.append(__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.get_successors(__lowerCamelCase ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(__lowerCamelCase ) else: # retrieve the best current path SCREAMING_SNAKE_CASE = self.open_nodes.pop(self.open_nodes.index(__lowerCamelCase ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(__lowerCamelCase ) else: self.open_nodes.append(__lowerCamelCase ) if not self.reached: return [self.start.pos] return None def _snake_case ( self : List[Any] , __lowerCamelCase : Node ): SCREAMING_SNAKE_CASE = [] for action in delta: SCREAMING_SNAKE_CASE = parent.pos_x + action[1] SCREAMING_SNAKE_CASE = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowerCamelCase ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( __lowerCamelCase , __lowerCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __lowerCamelCase , ) ) return successors def _snake_case ( self : str , __lowerCamelCase : Node | None ): SCREAMING_SNAKE_CASE = node SCREAMING_SNAKE_CASE = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) SCREAMING_SNAKE_CASE = current_node.parent path.reverse() return path if __name__ == "__main__": __A : Optional[Any] = (0, 0) __A : Optional[int] = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print('------') __A : List[str] = GreedyBestFirst(init, goal) __A : Tuple = greedy_bf.search() if path: for pos_x, pos_y in path: __A : Optional[Any] = 2 for elem in grid: print(elem)
16
1
import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Any ): SCREAMING_SNAKE_CASE = "hf-internal-testing/tiny-random-t5" SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer("This is me" , return_tensors="pt" ) SCREAMING_SNAKE_CASE = model.to_bettertransformer() self.assertTrue(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) SCREAMING_SNAKE_CASE = model.generate(**__lowerCamelCase ) SCREAMING_SNAKE_CASE = model.reverse_bettertransformer() self.assertFalse(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase ) self.assertFalse( any("BetterTransformer" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) SCREAMING_SNAKE_CASE = model_reloaded.generate(**__lowerCamelCase ) self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase ) ) def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = "hf-internal-testing/tiny-random-t5" SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(__lowerCamelCase ): model.save_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = model.reverse_bettertransformer() model.save_pretrained(__lowerCamelCase )
16
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES __A : int = logging.get_logger(__name__) __A : List[str] = OrderedDict( [ # Base model mapping ('albert', 'FlaxAlbertModel'), ('bart', 'FlaxBartModel'), ('beit', 'FlaxBeitModel'), ('bert', 'FlaxBertModel'), ('big_bird', 'FlaxBigBirdModel'), ('blenderbot', 'FlaxBlenderbotModel'), ('blenderbot-small', 'FlaxBlenderbotSmallModel'), ('clip', 'FlaxCLIPModel'), ('distilbert', 'FlaxDistilBertModel'), ('electra', 'FlaxElectraModel'), ('gpt-sw3', 'FlaxGPT2Model'), ('gpt2', 'FlaxGPT2Model'), ('gpt_neo', 'FlaxGPTNeoModel'), ('gptj', 'FlaxGPTJModel'), ('longt5', 'FlaxLongT5Model'), ('marian', 'FlaxMarianModel'), ('mbart', 'FlaxMBartModel'), ('mt5', 'FlaxMT5Model'), ('opt', 'FlaxOPTModel'), ('pegasus', 'FlaxPegasusModel'), ('regnet', 'FlaxRegNetModel'), ('resnet', 'FlaxResNetModel'), ('roberta', 'FlaxRobertaModel'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'), ('roformer', 'FlaxRoFormerModel'), ('t5', 'FlaxT5Model'), ('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'), ('vit', 'FlaxViTModel'), ('wav2vec2', 'FlaxWav2Vec2Model'), ('whisper', 'FlaxWhisperModel'), ('xglm', 'FlaxXGLMModel'), ('xlm-roberta', 'FlaxXLMRobertaModel'), ] ) __A : Optional[Any] = OrderedDict( [ # Model for pre-training mapping ('albert', 'FlaxAlbertForPreTraining'), ('bart', 'FlaxBartForConditionalGeneration'), ('bert', 'FlaxBertForPreTraining'), ('big_bird', 'FlaxBigBirdForPreTraining'), ('electra', 'FlaxElectraForPreTraining'), ('longt5', 'FlaxLongT5ForConditionalGeneration'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('mt5', 'FlaxMT5ForConditionalGeneration'), ('roberta', 'FlaxRobertaForMaskedLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'), ('roformer', 'FlaxRoFormerForMaskedLM'), ('t5', 'FlaxT5ForConditionalGeneration'), ('wav2vec2', 'FlaxWav2Vec2ForPreTraining'), ('whisper', 'FlaxWhisperForConditionalGeneration'), ('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'), ] ) __A : Tuple = OrderedDict( [ # Model for Masked LM mapping ('albert', 'FlaxAlbertForMaskedLM'), ('bart', 'FlaxBartForConditionalGeneration'), ('bert', 'FlaxBertForMaskedLM'), ('big_bird', 'FlaxBigBirdForMaskedLM'), ('distilbert', 'FlaxDistilBertForMaskedLM'), ('electra', 'FlaxElectraForMaskedLM'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('roberta', 'FlaxRobertaForMaskedLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'), ('roformer', 'FlaxRoFormerForMaskedLM'), ('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'), ] ) __A : Dict = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ('bart', 'FlaxBartForConditionalGeneration'), ('blenderbot', 'FlaxBlenderbotForConditionalGeneration'), ('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'), ('encoder-decoder', 'FlaxEncoderDecoderModel'), ('longt5', 'FlaxLongT5ForConditionalGeneration'), ('marian', 'FlaxMarianMTModel'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('mt5', 'FlaxMT5ForConditionalGeneration'), ('pegasus', 'FlaxPegasusForConditionalGeneration'), ('t5', 'FlaxT5ForConditionalGeneration'), ] ) __A : Any = OrderedDict( [ # Model for Image-classsification ('beit', 'FlaxBeitForImageClassification'), ('regnet', 'FlaxRegNetForImageClassification'), ('resnet', 'FlaxResNetForImageClassification'), ('vit', 'FlaxViTForImageClassification'), ] ) __A : Optional[int] = OrderedDict( [ ('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'), ] ) __A : Union[str, Any] = OrderedDict( [ # Model for Causal LM mapping ('bart', 'FlaxBartForCausalLM'), ('bert', 'FlaxBertForCausalLM'), ('big_bird', 'FlaxBigBirdForCausalLM'), ('electra', 'FlaxElectraForCausalLM'), ('gpt-sw3', 'FlaxGPT2LMHeadModel'), ('gpt2', 'FlaxGPT2LMHeadModel'), ('gpt_neo', 'FlaxGPTNeoForCausalLM'), ('gptj', 'FlaxGPTJForCausalLM'), ('opt', 'FlaxOPTForCausalLM'), ('roberta', 'FlaxRobertaForCausalLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'), ('xglm', 'FlaxXGLMForCausalLM'), ('xlm-roberta', 'FlaxXLMRobertaForCausalLM'), ] ) __A : Optional[int] = OrderedDict( [ # Model for Sequence Classification mapping ('albert', 'FlaxAlbertForSequenceClassification'), ('bart', 'FlaxBartForSequenceClassification'), ('bert', 'FlaxBertForSequenceClassification'), ('big_bird', 'FlaxBigBirdForSequenceClassification'), ('distilbert', 'FlaxDistilBertForSequenceClassification'), ('electra', 'FlaxElectraForSequenceClassification'), ('mbart', 'FlaxMBartForSequenceClassification'), ('roberta', 'FlaxRobertaForSequenceClassification'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'), ('roformer', 'FlaxRoFormerForSequenceClassification'), ('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'), ] ) __A : str = OrderedDict( [ # Model for Question Answering mapping ('albert', 'FlaxAlbertForQuestionAnswering'), ('bart', 'FlaxBartForQuestionAnswering'), ('bert', 'FlaxBertForQuestionAnswering'), ('big_bird', 'FlaxBigBirdForQuestionAnswering'), ('distilbert', 'FlaxDistilBertForQuestionAnswering'), ('electra', 'FlaxElectraForQuestionAnswering'), ('mbart', 'FlaxMBartForQuestionAnswering'), ('roberta', 'FlaxRobertaForQuestionAnswering'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'), ('roformer', 'FlaxRoFormerForQuestionAnswering'), ('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'), ] ) __A : Dict = OrderedDict( [ # Model for Token Classification mapping ('albert', 'FlaxAlbertForTokenClassification'), ('bert', 'FlaxBertForTokenClassification'), ('big_bird', 'FlaxBigBirdForTokenClassification'), ('distilbert', 'FlaxDistilBertForTokenClassification'), ('electra', 'FlaxElectraForTokenClassification'), ('roberta', 'FlaxRobertaForTokenClassification'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'), ('roformer', 'FlaxRoFormerForTokenClassification'), ('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'), ] ) __A : Dict = OrderedDict( [ # Model for Multiple Choice mapping ('albert', 'FlaxAlbertForMultipleChoice'), ('bert', 'FlaxBertForMultipleChoice'), ('big_bird', 'FlaxBigBirdForMultipleChoice'), ('distilbert', 'FlaxDistilBertForMultipleChoice'), ('electra', 'FlaxElectraForMultipleChoice'), ('roberta', 'FlaxRobertaForMultipleChoice'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'), ('roformer', 'FlaxRoFormerForMultipleChoice'), ('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'), ] ) __A : Any = OrderedDict( [ ('bert', 'FlaxBertForNextSentencePrediction'), ] ) __A : Optional[int] = OrderedDict( [ ('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'), ('whisper', 'FlaxWhisperForConditionalGeneration'), ] ) __A : List[str] = OrderedDict( [ ('whisper', 'FlaxWhisperForAudioClassification'), ] ) __A : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) __A : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) __A : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) __A : Tuple = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) __A : Any = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) __A : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) __A : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) __A : Optional[int] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) __A : List[str] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) __A : Union[str, Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) __A : List[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) __A : Optional[int] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) __A : List[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) __A : Tuple = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_MAPPING __A : Optional[int] = auto_class_update(FlaxAutoModel) class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING __A : Optional[Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining') class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING __A : Tuple = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling') class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING __A : List[Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling') class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING __A : int = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base' ) class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING __A : Optional[int] = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc='sequence classification' ) class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING __A : int = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering') class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING __A : List[Any] = auto_class_update( FlaxAutoModelForTokenClassification, head_doc='token classification' ) class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING __A : Any = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice') class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING __A : Union[str, Any] = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction' ) class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING __A : Optional[Any] = auto_class_update( FlaxAutoModelForImageClassification, head_doc='image classification' ) class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING __A : int = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling') class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING __A : Optional[int] = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling' )
16
1
import os import unittest from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer from ...test_tokenization_common import TokenizerTesterMixin class _SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = PhobertTokenizer lowerCamelCase__ = False def _snake_case ( self : Tuple ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt SCREAMING_SNAKE_CASE = ["T@@", "i", "I", "R@@", "r", "e@@"] SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) SCREAMING_SNAKE_CASE = ["#version: 0.2", "l à</w>"] SCREAMING_SNAKE_CASE = {"unk_token": "<unk>"} SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: for token in vocab_tokens: fp.write(f"{token} {vocab_tokens[token]}\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(__lowerCamelCase ) ) def _snake_case ( self : Dict , **__lowerCamelCase : Optional[int] ): kwargs.update(self.special_tokens_map ) return PhobertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def _snake_case ( self : Any , __lowerCamelCase : Optional[int] ): SCREAMING_SNAKE_CASE = "Tôi là VinAI Research" SCREAMING_SNAKE_CASE = "T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>" return input_text, output_text def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) SCREAMING_SNAKE_CASE = "Tôi là VinAI Research" SCREAMING_SNAKE_CASE = "T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h".split() SCREAMING_SNAKE_CASE = tokenizer.tokenize(__lowerCamelCase ) print(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = tokens + [tokenizer.unk_token] SCREAMING_SNAKE_CASE = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
16
def __a ( A__ : float , A__ : float ): if mass < 0: raise ValueError("The mass of a body cannot be negative" ) return 0.5 * mass * abs(A__ ) * abs(A__ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
16
1
import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') __A : int = logging.getLogger(__name__) @dataclass class _SCREAMING_SNAKE_CASE : '''simple docstring''' lowerCamelCase__ = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) lowerCamelCase__ = field( default=__snake_case , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) lowerCamelCase__ = field( default=__snake_case , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) lowerCamelCase__ = field( default=__snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) lowerCamelCase__ = field( default=__snake_case , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , ) lowerCamelCase__ = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) lowerCamelCase__ = field( default=__snake_case , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) @dataclass class _SCREAMING_SNAKE_CASE : '''simple docstring''' lowerCamelCase__ = field(default=__snake_case , metadata={"help": "The input training data file (a text file)."} ) lowerCamelCase__ = field( default=__snake_case , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , ) lowerCamelCase__ = field( default=__snake_case , metadata={"help": "Overwrite the cached training and evaluation sets"} ) lowerCamelCase__ = field( default=__snake_case , metadata={"help": "The number of processes to use for the preprocessing."} , ) lowerCamelCase__ = field( default=__snake_case , metadata={ "help": ( "The maximum total input sequence length after tokenization. If passed, sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) lowerCamelCase__ = field( default=__snake_case , metadata={ "help": ( "Whether to pad all samples to the maximum sentence length. " "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " "efficient on GPU but very bad for TPU." ) } , ) lowerCamelCase__ = field( default=__snake_case , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) lowerCamelCase__ = field( default=__snake_case , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) def _snake_case ( self : Optional[int] ): if self.train_file is not None: SCREAMING_SNAKE_CASE = self.train_file.split("." )[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: SCREAMING_SNAKE_CASE = self.validation_file.split("." )[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class _SCREAMING_SNAKE_CASE : '''simple docstring''' lowerCamelCase__ = 42 lowerCamelCase__ = True lowerCamelCase__ = None lowerCamelCase__ = None def __call__( self : List[str] , __lowerCamelCase : List[Any] ): SCREAMING_SNAKE_CASE = "label" if "label" in features[0].keys() else "labels" SCREAMING_SNAKE_CASE = [feature.pop(__lowerCamelCase ) for feature in features] SCREAMING_SNAKE_CASE = len(__lowerCamelCase ) SCREAMING_SNAKE_CASE = len(features[0]["input_ids"] ) SCREAMING_SNAKE_CASE = [ [{k: v[i] for k, v in feature.items()} for i in range(__lowerCamelCase )] for feature in features ] SCREAMING_SNAKE_CASE = list(chain(*__lowerCamelCase ) ) SCREAMING_SNAKE_CASE = self.tokenizer.pad( __lowerCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , ) # Un-flatten SCREAMING_SNAKE_CASE = {k: v.view(__lowerCamelCase , __lowerCamelCase , -1 ) for k, v in batch.items()} # Add back labels SCREAMING_SNAKE_CASE = torch.tensor(__lowerCamelCase , dtype=torch.intaa ) return batch def __a ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_swag" , A__ , A__ ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() SCREAMING_SNAKE_CASE = training_args.get_process_log_level() logger.setLevel(A__ ) datasets.utils.logging.set_verbosity(A__ ) transformers.utils.logging.set_verbosity(A__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) logger.info(F"Training/evaluation parameters {training_args}" ) # Detecting last checkpoint. SCREAMING_SNAKE_CASE = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: SCREAMING_SNAKE_CASE = {} if data_args.train_file is not None: SCREAMING_SNAKE_CASE = data_args.train_file if data_args.validation_file is not None: SCREAMING_SNAKE_CASE = data_args.validation_file SCREAMING_SNAKE_CASE = data_args.train_file.split("." )[-1] SCREAMING_SNAKE_CASE = load_dataset( A__ , data_files=A__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: # Downloading and loading the swag dataset from the hub. SCREAMING_SNAKE_CASE = load_dataset( "swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) SCREAMING_SNAKE_CASE = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=A__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # When using your own dataset or a different dataset from swag, you will probably need to change this. SCREAMING_SNAKE_CASE = [F"ending{i}" for i in range(4 )] SCREAMING_SNAKE_CASE = "sent1" SCREAMING_SNAKE_CASE = "sent2" if data_args.max_seq_length is None: SCREAMING_SNAKE_CASE = tokenizer.model_max_length if max_seq_length > 1024: logger.warning( "The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value" " of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can" " override this default with `--block_size xxx`." ) SCREAMING_SNAKE_CASE = 1024 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the" F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) SCREAMING_SNAKE_CASE = min(data_args.max_seq_length , tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(A__ : Optional[Any] ): SCREAMING_SNAKE_CASE = [[context] * 4 for context in examples[context_name]] SCREAMING_SNAKE_CASE = examples[question_header_name] SCREAMING_SNAKE_CASE = [ [F"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(A__ ) ] # Flatten out SCREAMING_SNAKE_CASE = list(chain(*A__ ) ) SCREAMING_SNAKE_CASE = list(chain(*A__ ) ) # Tokenize SCREAMING_SNAKE_CASE = tokenizer( A__ , A__ , truncation=A__ , max_length=A__ , padding="max_length" if data_args.pad_to_max_length else False , ) # Un-flatten return {k: [v[i : i + 4] for i in range(0 , len(A__ ) , 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset" ) SCREAMING_SNAKE_CASE = raw_datasets["train"] if data_args.max_train_samples is not None: SCREAMING_SNAKE_CASE = min(len(A__ ) , data_args.max_train_samples ) SCREAMING_SNAKE_CASE = train_dataset.select(range(A__ ) ) with training_args.main_process_first(desc="train dataset map pre-processing" ): SCREAMING_SNAKE_CASE = train_dataset.map( A__ , batched=A__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset" ) SCREAMING_SNAKE_CASE = raw_datasets["validation"] if data_args.max_eval_samples is not None: SCREAMING_SNAKE_CASE = min(len(A__ ) , data_args.max_eval_samples ) SCREAMING_SNAKE_CASE = eval_dataset.select(range(A__ ) ) with training_args.main_process_first(desc="validation dataset map pre-processing" ): SCREAMING_SNAKE_CASE = eval_dataset.map( A__ , batched=A__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) # Data collator SCREAMING_SNAKE_CASE = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=A__ , pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(A__ : Optional[int] ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = eval_predictions SCREAMING_SNAKE_CASE = np.argmax(A__ , axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer SCREAMING_SNAKE_CASE = Trainer( model=A__ , args=A__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=A__ , data_collator=A__ , compute_metrics=A__ , ) # Training if training_args.do_train: SCREAMING_SNAKE_CASE = None if training_args.resume_from_checkpoint is not None: SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint elif last_checkpoint is not None: SCREAMING_SNAKE_CASE = last_checkpoint SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=A__ ) trainer.save_model() # Saves the tokenizer too for easy upload SCREAMING_SNAKE_CASE = train_result.metrics SCREAMING_SNAKE_CASE = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(A__ ) ) SCREAMING_SNAKE_CASE = min(A__ , len(A__ ) ) trainer.log_metrics("train" , A__ ) trainer.save_metrics("train" , A__ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) SCREAMING_SNAKE_CASE = trainer.evaluate() SCREAMING_SNAKE_CASE = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(A__ ) SCREAMING_SNAKE_CASE = min(A__ , len(A__ ) ) trainer.log_metrics("eval" , A__ ) trainer.save_metrics("eval" , A__ ) SCREAMING_SNAKE_CASE = { "finetuned_from": model_args.model_name_or_path, "tasks": "multiple-choice", "dataset_tags": "swag", "dataset_args": "regular", "dataset": "SWAG", "language": "en", } if training_args.push_to_hub: trainer.push_to_hub(**A__ ) else: trainer.create_model_card(**A__ ) def __a ( A__ : Any ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
16
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf __A : Dict = logging.get_logger(__name__) @dataclass class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = [ "no_inference", "no_cuda", "no_tpu", "no_speed", "no_memory", "no_env_print", "no_multi_process", ] def __init__( self : List[Any] , **__lowerCamelCase : Any ): for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: SCREAMING_SNAKE_CASE = deprecated_arg[3:] SCREAMING_SNAKE_CASE = not kwargs.pop(__lowerCamelCase ) logger.warning( f"{deprecated_arg} is depreciated. Please use --no-{positive_arg} or" f" {positive_arg}={kwargs[positive_arg]}" ) SCREAMING_SNAKE_CASE = kwargs.pop("tpu_name" , self.tpu_name ) SCREAMING_SNAKE_CASE = kwargs.pop("device_idx" , self.device_idx ) SCREAMING_SNAKE_CASE = kwargs.pop("eager_mode" , self.eager_mode ) SCREAMING_SNAKE_CASE = kwargs.pop("use_xla" , self.use_xla ) super().__init__(**__lowerCamelCase ) lowerCamelCase__ = field( default=__snake_case , metadata={"help": "Name of TPU"} , ) lowerCamelCase__ = field( default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , ) lowerCamelCase__ = field(default=__snake_case , metadata={"help": "Benchmark models in eager model."} ) lowerCamelCase__ = field( default=__snake_case , metadata={ "help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`." } , ) @cached_property def _snake_case ( self : Optional[int] ): requires_backends(self , ["tf"] ) SCREAMING_SNAKE_CASE = None if self.tpu: try: if self.tpu_name: SCREAMING_SNAKE_CASE = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: SCREAMING_SNAKE_CASE = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: SCREAMING_SNAKE_CASE = None return tpu @cached_property def _snake_case ( self : Any ): requires_backends(self , ["tf"] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) SCREAMING_SNAKE_CASE = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" ) SCREAMING_SNAKE_CASE = tf.distribute.OneDeviceStrategy(device=f"/gpu:{self.device_idx}" ) else: tf.config.set_visible_devices([] , "GPU" ) # disable GPU SCREAMING_SNAKE_CASE = tf.distribute.OneDeviceStrategy(device=f"/cpu:{self.device_idx}" ) return strategy @property def _snake_case ( self : Any ): requires_backends(self , ["tf"] ) return self._setup_tpu is not None @property def _snake_case ( self : Optional[Any] ): requires_backends(self , ["tf"] ) return self._setup_strategy @property def _snake_case ( self : List[str] ): requires_backends(self , ["tf"] ) return tf.config.list_physical_devices("GPU" ) @property def _snake_case ( self : Any ): requires_backends(self , ["tf"] ) if self.cuda: return len(self.gpu_list ) return 0 @property def _snake_case ( self : Dict ): return self.n_gpu > 0
16
1
import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, ) from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : int , __lowerCamelCase : List[str] , __lowerCamelCase : Dict=2 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Union[str, Any]=2 , __lowerCamelCase : Any=7 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Union[str, Any]=99 , __lowerCamelCase : Dict=36 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : str=4 , __lowerCamelCase : Union[str, Any]=37 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : List[str]=512 , __lowerCamelCase : Any=16 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Any=0.02 , __lowerCamelCase : List[str]=6 , __lowerCamelCase : Dict=6 , __lowerCamelCase : int=3 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : Any=None , __lowerCamelCase : Any=1000 , ): SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = patch_size SCREAMING_SNAKE_CASE = text_seq_length SCREAMING_SNAKE_CASE = is_training SCREAMING_SNAKE_CASE = use_input_mask SCREAMING_SNAKE_CASE = use_token_type_ids SCREAMING_SNAKE_CASE = use_labels SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = type_vocab_size SCREAMING_SNAKE_CASE = type_sequence_label_size SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = coordinate_size SCREAMING_SNAKE_CASE = shape_size SCREAMING_SNAKE_CASE = num_labels SCREAMING_SNAKE_CASE = num_choices SCREAMING_SNAKE_CASE = scope SCREAMING_SNAKE_CASE = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) SCREAMING_SNAKE_CASE = text_seq_length SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2 + 1 SCREAMING_SNAKE_CASE = self.text_seq_length + self.image_seq_length def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: SCREAMING_SNAKE_CASE = bbox[i, j, 3] SCREAMING_SNAKE_CASE = bbox[i, j, 1] SCREAMING_SNAKE_CASE = t if bbox[i, j, 2] < bbox[i, j, 0]: SCREAMING_SNAKE_CASE = bbox[i, j, 2] SCREAMING_SNAKE_CASE = bbox[i, j, 0] SCREAMING_SNAKE_CASE = t SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE = None if self.use_input_mask: SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.text_seq_length] ) SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None if self.use_labels: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def _snake_case ( self : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] ): SCREAMING_SNAKE_CASE = LayoutLMvaModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() # text + image SCREAMING_SNAKE_CASE = model(__lowerCamelCase , pixel_values=__lowerCamelCase ) SCREAMING_SNAKE_CASE = model( __lowerCamelCase , bbox=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase ) SCREAMING_SNAKE_CASE = model(__lowerCamelCase , bbox=__lowerCamelCase , pixel_values=__lowerCamelCase , token_type_ids=__lowerCamelCase ) SCREAMING_SNAKE_CASE = model(__lowerCamelCase , bbox=__lowerCamelCase , pixel_values=__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only SCREAMING_SNAKE_CASE = model(__lowerCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only SCREAMING_SNAKE_CASE = model(pixel_values=__lowerCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def _snake_case ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] ): SCREAMING_SNAKE_CASE = self.num_labels SCREAMING_SNAKE_CASE = LayoutLMvaForSequenceClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model( __lowerCamelCase , bbox=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : str ): SCREAMING_SNAKE_CASE = self.num_labels SCREAMING_SNAKE_CASE = LayoutLMvaForTokenClassification(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model( __lowerCamelCase , bbox=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def _snake_case ( self : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any ): SCREAMING_SNAKE_CASE = LayoutLMvaForQuestionAnswering(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model( __lowerCamelCase , bbox=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _snake_case ( self : List[str] ): SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ) = config_and_inputs SCREAMING_SNAKE_CASE = { "input_ids": input_ids, "bbox": bbox, "pixel_values": pixel_values, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = ( ( LayoutLMvaModel, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaForQuestionAnswering, ) if is_torch_available() else () ) lowerCamelCase__ = ( {"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel} if is_torch_available() else {} ) def _snake_case ( self : Any , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ): # `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual # embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has # the sequence dimension of the text embedding only. # (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`) return True def _snake_case ( self : str ): SCREAMING_SNAKE_CASE = LayoutLMvaModelTester(self ) SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 ) def _snake_case ( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : List[Any]=False ): SCREAMING_SNAKE_CASE = copy.deepcopy(__lowerCamelCase ) if model_class in get_values(__lowerCamelCase ): SCREAMING_SNAKE_CASE = { k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous() if isinstance(__lowerCamelCase , torch.Tensor ) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(__lowerCamelCase ): SCREAMING_SNAKE_CASE = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase ) elif model_class in get_values(__lowerCamelCase ): SCREAMING_SNAKE_CASE = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase ) SCREAMING_SNAKE_CASE = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase ) elif model_class in [ *get_values(__lowerCamelCase ), ]: SCREAMING_SNAKE_CASE = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase ) elif model_class in [ *get_values(__lowerCamelCase ), ]: SCREAMING_SNAKE_CASE = torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=__lowerCamelCase , ) return inputs_dict def _snake_case ( self : Any ): self.config_tester.run_common_tests() def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: SCREAMING_SNAKE_CASE = type self.model_tester.create_and_check_model(*__lowerCamelCase ) def _snake_case ( self : List[Any] ): SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase ) def _snake_case ( self : Any ): SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase ) def _snake_case ( self : List[Any] ): SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase ) @slow def _snake_case ( self : Optional[Any] ): for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE = LayoutLMvaModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def __a ( ): SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @cached_property def _snake_case ( self : List[Any] ): return LayoutLMvaImageProcessor(apply_ocr=__lowerCamelCase ) if is_vision_available() else None @slow def _snake_case ( self : Optional[Any] ): SCREAMING_SNAKE_CASE = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.default_image_processor SCREAMING_SNAKE_CASE = prepare_img() SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="pt" ).pixel_values.to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = torch.tensor([[1, 2]] ) SCREAMING_SNAKE_CASE = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 ) # forward pass SCREAMING_SNAKE_CASE = model( input_ids=input_ids.to(__lowerCamelCase ) , bbox=bbox.to(__lowerCamelCase ) , pixel_values=pixel_values.to(__lowerCamelCase ) , ) # verify the logits SCREAMING_SNAKE_CASE = torch.Size((1, 199, 768) ) self.assertEqual(outputs.last_hidden_state.shape , __lowerCamelCase ) SCREAMING_SNAKE_CASE = torch.tensor( [[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowerCamelCase , atol=1e-4 ) )
16
from collections.abc import Callable import numpy as np def __a ( A__ : Callable , A__ : float , A__ : float , A__ : float , A__ : float ): SCREAMING_SNAKE_CASE = int(np.ceil((x_end - xa) / step_size ) ) SCREAMING_SNAKE_CASE = np.zeros((n + 1,) ) SCREAMING_SNAKE_CASE = ya SCREAMING_SNAKE_CASE = xa for k in range(A__ ): SCREAMING_SNAKE_CASE = y[k] + step_size * ode_func(A__ , y[k] ) SCREAMING_SNAKE_CASE = y[k] + ( (step_size / 2) * (ode_func(A__ , y[k] ) + ode_func(x + step_size , A__ )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
16
1
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __A : Tuple = logging.get_logger(__name__) __A : int = { 'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json', } class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ): '''simple docstring''' lowerCamelCase__ = "bit" lowerCamelCase__ = ["preactivation", "bottleneck"] lowerCamelCase__ = ["SAME", "VALID"] def __init__( self : List[str] , __lowerCamelCase : Any=3 , __lowerCamelCase : Optional[Any]=64 , __lowerCamelCase : str=[256, 512, 1024, 2048] , __lowerCamelCase : str=[3, 4, 6, 3] , __lowerCamelCase : int="preactivation" , __lowerCamelCase : Optional[int]="relu" , __lowerCamelCase : int=None , __lowerCamelCase : List[Any]=32 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Optional[int]=32 , __lowerCamelCase : Optional[Any]=1 , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Optional[Any]=None , **__lowerCamelCase : Optional[int] , ): super().__init__(**__lowerCamelCase ) if layer_type not in self.layer_types: raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" ) if global_padding is not None: if global_padding.upper() in self.supported_padding: SCREAMING_SNAKE_CASE = global_padding.upper() else: raise ValueError(f"Padding strategy {global_padding} not supported" ) SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = embedding_size SCREAMING_SNAKE_CASE = hidden_sizes SCREAMING_SNAKE_CASE = depths SCREAMING_SNAKE_CASE = layer_type SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = global_padding SCREAMING_SNAKE_CASE = num_groups SCREAMING_SNAKE_CASE = drop_path_rate SCREAMING_SNAKE_CASE = embedding_dynamic_padding SCREAMING_SNAKE_CASE = output_stride SCREAMING_SNAKE_CASE = width_factor SCREAMING_SNAKE_CASE = ["stem"] + [f"stage{idx}" for idx in range(1 , len(__lowerCamelCase ) + 1 )] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices( out_features=__lowerCamelCase , out_indices=__lowerCamelCase , stage_names=self.stage_names )
16
def __a ( A__ : int ): if not isinstance(A__ , A__ ): raise ValueError("Input must be an integer" ) if input_num <= 0: raise ValueError("Input must be positive" ) return sum( divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 ) if __name__ == "__main__": import doctest doctest.testmod()
16
1
import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = "char" lowerCamelCase__ = "bpe" lowerCamelCase__ = "wp" __A : Optional[Any] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = ["image_processor", "char_tokenizer"] lowerCamelCase__ = "ViTImageProcessor" lowerCamelCase__ = "MgpstrTokenizer" def __init__( self : List[Any] , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=None , **__lowerCamelCase : Optional[Any] ): SCREAMING_SNAKE_CASE = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __lowerCamelCase , ) SCREAMING_SNAKE_CASE = kwargs.pop("feature_extractor" ) SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) SCREAMING_SNAKE_CASE = tokenizer SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("gpt2" ) SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("bert-base-uncased" ) super().__init__(__lowerCamelCase , __lowerCamelCase ) def __call__( self : int , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : Any=None , **__lowerCamelCase : str ): if images is None and text is None: raise ValueError("You need to specify either an `images` or `text` input to process." ) if images is not None: SCREAMING_SNAKE_CASE = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase ) if text is not None: SCREAMING_SNAKE_CASE = self.char_tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase ) if text is None: return inputs elif images is None: return encodings else: SCREAMING_SNAKE_CASE = encodings["input_ids"] return inputs def _snake_case ( self : Optional[int] , __lowerCamelCase : List[Any] ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = sequences SCREAMING_SNAKE_CASE = char_preds.size(0 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._decode_helper(__lowerCamelCase , "char" ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._decode_helper(__lowerCamelCase , "bpe" ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._decode_helper(__lowerCamelCase , "wp" ) SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] for i in range(__lowerCamelCase ): SCREAMING_SNAKE_CASE = [char_scores[i], bpe_scores[i], wp_scores[i]] SCREAMING_SNAKE_CASE = [char_strs[i], bpe_strs[i], wp_strs[i]] SCREAMING_SNAKE_CASE = scores.index(max(__lowerCamelCase ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = final_strs SCREAMING_SNAKE_CASE = final_scores SCREAMING_SNAKE_CASE = char_strs SCREAMING_SNAKE_CASE = bpe_strs SCREAMING_SNAKE_CASE = wp_strs return out def _snake_case ( self : int , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] ): if format == DecodeType.CHARACTER: SCREAMING_SNAKE_CASE = self.char_decode SCREAMING_SNAKE_CASE = 1 SCREAMING_SNAKE_CASE = "[s]" elif format == DecodeType.BPE: SCREAMING_SNAKE_CASE = self.bpe_decode SCREAMING_SNAKE_CASE = 2 SCREAMING_SNAKE_CASE = "#" elif format == DecodeType.WORDPIECE: SCREAMING_SNAKE_CASE = self.wp_decode SCREAMING_SNAKE_CASE = 102 SCREAMING_SNAKE_CASE = "[SEP]" else: raise ValueError(f"Format {format} is not supported." ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = [], [] SCREAMING_SNAKE_CASE = pred_logits.size(0 ) SCREAMING_SNAKE_CASE = pred_logits.size(1 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = pred_logits.topk(1 , dim=-1 , largest=__lowerCamelCase , sorted=__lowerCamelCase ) SCREAMING_SNAKE_CASE = preds_index.view(-1 , __lowerCamelCase )[:, 1:] SCREAMING_SNAKE_CASE = decoder(__lowerCamelCase ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torch.nn.functional.softmax(__lowerCamelCase , dim=2 ).max(dim=2 ) SCREAMING_SNAKE_CASE = preds_max_prob[:, 1:] for index in range(__lowerCamelCase ): SCREAMING_SNAKE_CASE = preds_str[index].find(__lowerCamelCase ) SCREAMING_SNAKE_CASE = preds_str[index][:pred_eos] SCREAMING_SNAKE_CASE = preds_index[index].cpu().tolist() SCREAMING_SNAKE_CASE = pred_index.index(__lowerCamelCase ) if eos_token in pred_index else -1 SCREAMING_SNAKE_CASE = preds_max_prob[index][: pred_eos_index + 1] SCREAMING_SNAKE_CASE = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(__lowerCamelCase ) conf_scores.append(__lowerCamelCase ) return dec_strs, conf_scores def _snake_case ( self : Any , __lowerCamelCase : List[Any] ): SCREAMING_SNAKE_CASE = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(__lowerCamelCase )] return decode_strs def _snake_case ( self : Optional[Any] , __lowerCamelCase : Optional[int] ): return self.bpe_tokenizer.batch_decode(__lowerCamelCase ) def _snake_case ( self : Optional[Any] , __lowerCamelCase : str ): SCREAMING_SNAKE_CASE = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(__lowerCamelCase )] return decode_strs
16
from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent __A : List[Any] = {'UserAgent': UserAgent().random} def __a ( A__ : List[Any] ): SCREAMING_SNAKE_CASE = script.contents[0] SCREAMING_SNAKE_CASE = json.loads(data[data.find("{\"config\"" ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : List[str] , __lowerCamelCase : Optional[Any] ): SCREAMING_SNAKE_CASE = f"https://www.instagram.com/{username}/" SCREAMING_SNAKE_CASE = self.get_json() def _snake_case ( self : Tuple ): SCREAMING_SNAKE_CASE = requests.get(self.url , headers=__lowerCamelCase ).text SCREAMING_SNAKE_CASE = BeautifulSoup(__lowerCamelCase , "html.parser" ).find_all("script" ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__( self : Union[str, Any] ): return f"{self.__class__.__name__}('{self.username}')" def __str__( self : str ): return f"{self.fullname} ({self.username}) is {self.biography}" @property def _snake_case ( self : Optional[int] ): return self.user_data["username"] @property def _snake_case ( self : List[Any] ): return self.user_data["full_name"] @property def _snake_case ( self : List[str] ): return self.user_data["biography"] @property def _snake_case ( self : Tuple ): return self.user_data["business_email"] @property def _snake_case ( self : Optional[Any] ): return self.user_data["external_url"] @property def _snake_case ( self : int ): return self.user_data["edge_followed_by"]["count"] @property def _snake_case ( self : List[str] ): return self.user_data["edge_follow"]["count"] @property def _snake_case ( self : List[Any] ): return self.user_data["edge_owner_to_timeline_media"]["count"] @property def _snake_case ( self : Any ): return self.user_data["profile_pic_url_hd"] @property def _snake_case ( self : Optional[int] ): return self.user_data["is_verified"] @property def _snake_case ( self : Dict ): return self.user_data["is_private"] def __a ( A__ : str = "github" ): import os if os.environ.get("CI" ): return # test failing on GitHub Actions SCREAMING_SNAKE_CASE = InstagramUser(A__ ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , A__ ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 150 assert instagram_user.number_of_followers > 120000 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "[email protected]" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith("https://instagram." ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() __A : Dict = InstagramUser('github') print(instagram_user) print(f'{instagram_user.number_of_posts = }') print(f'{instagram_user.number_of_followers = }') print(f'{instagram_user.number_of_followings = }') print(f'{instagram_user.email = }') print(f'{instagram_user.website = }') print(f'{instagram_user.profile_picture_url = }') print(f'{instagram_user.is_verified = }') print(f'{instagram_user.is_private = }')
16
1
import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def __a ( A__ : List[str] ): SCREAMING_SNAKE_CASE = filter(lambda A__ : p.requires_grad , model.parameters() ) SCREAMING_SNAKE_CASE = sum([np.prod(p.size() ) for p in model_parameters] ) return params __A : Optional[int] = logging.getLogger(__name__) def __a ( A__ : Any , A__ : str ): if metric == "rouge2": SCREAMING_SNAKE_CASE = "{val_avg_rouge2:.4f}-{step_count}" elif metric == "bleu": SCREAMING_SNAKE_CASE = "{val_avg_bleu:.4f}-{step_count}" elif metric == "em": SCREAMING_SNAKE_CASE = "{val_avg_em:.4f}-{step_count}" else: raise NotImplementedError( F"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this" " function." ) SCREAMING_SNAKE_CASE = ModelCheckpoint( dirpath=A__ , filename=A__ , monitor=F"val_{metric}" , mode="max" , save_top_k=3 , every_n_epochs=1 , ) return checkpoint_callback def __a ( A__ : int , A__ : List[str] ): return EarlyStopping( monitor=F"val_{metric}" , mode="min" if "loss" in metric else "max" , patience=A__ , verbose=A__ , ) class _SCREAMING_SNAKE_CASE ( pl.Callback ): '''simple docstring''' def _snake_case ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Dict ): SCREAMING_SNAKE_CASE = {f"lr_group_{i}": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(__lowerCamelCase ) @rank_zero_only def _snake_case ( self : Optional[Any] , __lowerCamelCase : pl.Trainer , __lowerCamelCase : pl.LightningModule , __lowerCamelCase : str , __lowerCamelCase : Optional[Any]=True ): logger.info(f"***** {type_path} results at step {trainer.global_step:05d} *****" ) SCREAMING_SNAKE_CASE = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} ) # Log results SCREAMING_SNAKE_CASE = Path(pl_module.hparams.output_dir ) if type_path == "test": SCREAMING_SNAKE_CASE = od / "test_results.txt" SCREAMING_SNAKE_CASE = od / "test_generations.txt" else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. SCREAMING_SNAKE_CASE = od / f"{type_path}_results/{trainer.global_step:05d}.txt" SCREAMING_SNAKE_CASE = od / f"{type_path}_generations/{trainer.global_step:05d}.txt" results_file.parent.mkdir(exist_ok=__lowerCamelCase ) generations_file.parent.mkdir(exist_ok=__lowerCamelCase ) with open(__lowerCamelCase , "a+" ) as writer: for key in sorted(__lowerCamelCase ): if key in ["log", "progress_bar", "preds"]: continue SCREAMING_SNAKE_CASE = metrics[key] if isinstance(__lowerCamelCase , torch.Tensor ): SCREAMING_SNAKE_CASE = val.item() SCREAMING_SNAKE_CASE = f"{key}: {val:.6f}\n" writer.write(__lowerCamelCase ) if not save_generations: return if "preds" in metrics: SCREAMING_SNAKE_CASE = "\n".join(metrics["preds"] ) generations_file.open("w+" ).write(__lowerCamelCase ) @rank_zero_only def _snake_case ( self : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] ): try: SCREAMING_SNAKE_CASE = pl_module.model.model.num_parameters() except AttributeError: SCREAMING_SNAKE_CASE = pl_module.model.num_parameters() SCREAMING_SNAKE_CASE = count_trainable_parameters(__lowerCamelCase ) # mp stands for million parameters trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} ) @rank_zero_only def _snake_case ( self : Union[str, Any] , __lowerCamelCase : pl.Trainer , __lowerCamelCase : pl.LightningModule ): save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(__lowerCamelCase , __lowerCamelCase , "test" ) @rank_zero_only def _snake_case ( self : int , __lowerCamelCase : pl.Trainer , __lowerCamelCase : Dict ): save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
16
import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __A : Any = logging.get_logger(__name__) __A : Any = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } __A : Optional[Any] = { 'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'}, 'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'}, 'tokenizer_config_file': { 'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json' }, } __A : Union[str, Any] = {'facebook/blenderbot-3B': 1_2_8} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def __a ( ): SCREAMING_SNAKE_CASE = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) SCREAMING_SNAKE_CASE = bs[:] SCREAMING_SNAKE_CASE = 0 for b in range(2**8 ): if b not in bs: bs.append(A__ ) cs.append(2**8 + n ) n += 1 SCREAMING_SNAKE_CASE = [chr(A__ ) for n in cs] return dict(zip(A__ , A__ ) ) def __a ( A__ : List[Any] ): SCREAMING_SNAKE_CASE = set() SCREAMING_SNAKE_CASE = word[0] for char in word[1:]: pairs.add((prev_char, char) ) SCREAMING_SNAKE_CASE = char return pairs class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ = ["input_ids", "attention_mask"] def __init__( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any="replace" , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : Optional[Any]="</s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : Union[str, Any]="<s>" , __lowerCamelCase : List[str]="<unk>" , __lowerCamelCase : Optional[Any]="<pad>" , __lowerCamelCase : Dict="<mask>" , __lowerCamelCase : Any=False , **__lowerCamelCase : Optional[Any] , ): SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token super().__init__( errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , ) with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle: SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase ) SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()} SCREAMING_SNAKE_CASE = errors # how to handle errors in decoding SCREAMING_SNAKE_CASE = bytes_to_unicode() SCREAMING_SNAKE_CASE = {v: k for k, v in self.byte_encoder.items()} with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle: SCREAMING_SNAKE_CASE = merges_handle.read().split("\n" )[1:-1] SCREAMING_SNAKE_CASE = [tuple(merge.split() ) for merge in bpe_merges] SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions SCREAMING_SNAKE_CASE = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def _snake_case ( self : str ): return len(self.encoder ) def _snake_case ( self : Union[str, Any] ): return dict(self.encoder , **self.added_tokens_encoder ) def _snake_case ( self : Dict , __lowerCamelCase : List[Any] ): if token in self.cache: return self.cache[token] SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase ) SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase ) if not pairs: return token while True: SCREAMING_SNAKE_CASE = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = bigram SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = 0 while i < len(__lowerCamelCase ): try: SCREAMING_SNAKE_CASE = word.index(__lowerCamelCase , __lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) SCREAMING_SNAKE_CASE = j if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase ) SCREAMING_SNAKE_CASE = new_word if len(__lowerCamelCase ) == 1: break else: SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase ) SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase ) SCREAMING_SNAKE_CASE = word return word def _snake_case ( self : Optional[Any] , __lowerCamelCase : List[Any] ): SCREAMING_SNAKE_CASE = [] for token in re.findall(self.pat , __lowerCamelCase ): SCREAMING_SNAKE_CASE = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) ) return bpe_tokens def _snake_case ( self : Tuple , __lowerCamelCase : Dict ): return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) ) def _snake_case ( self : Any , __lowerCamelCase : Optional[int] ): return self.decoder.get(__lowerCamelCase ) def _snake_case ( self : Optional[int] , __lowerCamelCase : List[Any] ): SCREAMING_SNAKE_CASE = "".join(__lowerCamelCase ) SCREAMING_SNAKE_CASE = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def _snake_case ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): if not os.path.isdir(__lowerCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return SCREAMING_SNAKE_CASE = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) SCREAMING_SNAKE_CASE = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" ) SCREAMING_SNAKE_CASE = 0 with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) SCREAMING_SNAKE_CASE = token_index writer.write(" ".join(__lowerCamelCase ) + "\n" ) index += 1 return vocab_file, merge_file def _snake_case ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCamelCase )) + [1] return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1] def _snake_case ( self : Optional[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): SCREAMING_SNAKE_CASE = [self.sep_token_id] SCREAMING_SNAKE_CASE = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Any=False , **__lowerCamelCase : Union[str, Any] ): SCREAMING_SNAKE_CASE = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()): SCREAMING_SNAKE_CASE = " " + text return (text, kwargs) def _snake_case ( self : Any , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): return token_ids_a + [self.eos_token_id] def _snake_case ( self : int , __lowerCamelCase : "Conversation" ): SCREAMING_SNAKE_CASE = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(" " + text ) else: # Generated responses should contain them already. inputs.append(__lowerCamelCase ) SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.encode(__lowerCamelCase ) if len(__lowerCamelCase ) > self.model_max_length: SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :] logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." ) return input_ids
16
1
from jiwer import compute_measures import datasets __A : Optional[int] = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n' __A : Tuple = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n' __A : Tuple = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _SCREAMING_SNAKE_CASE ( datasets.Metric ): '''simple docstring''' def _snake_case ( self : Any ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[ "https://en.wikipedia.org/wiki/Word_error_rate", ] , ) def _snake_case ( self : Union[str, Any] , __lowerCamelCase : int=None , __lowerCamelCase : int=None , __lowerCamelCase : Optional[int]=False ): if concatenate_texts: return compute_measures(__lowerCamelCase , __lowerCamelCase )["wer"] else: SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = 0 for prediction, reference in zip(__lowerCamelCase , __lowerCamelCase ): SCREAMING_SNAKE_CASE = compute_measures(__lowerCamelCase , __lowerCamelCase ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
16
import argparse import json import math import os import time import traceback import zipfile from collections import Counter import requests def __a ( A__ : str , A__ : List[Any]=None ): SCREAMING_SNAKE_CASE = None if token is not None: SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"} SCREAMING_SNAKE_CASE = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100" SCREAMING_SNAKE_CASE = requests.get(A__ , headers=A__ ).json() SCREAMING_SNAKE_CASE = {} try: job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} ) SCREAMING_SNAKE_CASE = math.ceil((result["total_count"] - 100) / 100 ) for i in range(A__ ): SCREAMING_SNAKE_CASE = requests.get(url + F"&page={i + 2}" , headers=A__ ).json() job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} ) return job_links except Exception: print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" ) return {} def __a ( A__ : List[Any] , A__ : Optional[int]=None ): SCREAMING_SNAKE_CASE = None if token is not None: SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"} SCREAMING_SNAKE_CASE = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100" SCREAMING_SNAKE_CASE = requests.get(A__ , headers=A__ ).json() SCREAMING_SNAKE_CASE = {} try: artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} ) SCREAMING_SNAKE_CASE = math.ceil((result["total_count"] - 100) / 100 ) for i in range(A__ ): SCREAMING_SNAKE_CASE = requests.get(url + F"&page={i + 2}" , headers=A__ ).json() artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} ) return artifacts except Exception: print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" ) return {} def __a ( A__ : Any , A__ : str , A__ : List[str] , A__ : Union[str, Any] ): SCREAMING_SNAKE_CASE = None if token is not None: SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"} SCREAMING_SNAKE_CASE = requests.get(A__ , headers=A__ , allow_redirects=A__ ) SCREAMING_SNAKE_CASE = result.headers["Location"] SCREAMING_SNAKE_CASE = requests.get(A__ , allow_redirects=A__ ) SCREAMING_SNAKE_CASE = os.path.join(A__ , F"{artifact_name}.zip" ) with open(A__ , "wb" ) as fp: fp.write(response.content ) def __a ( A__ : List[Any] , A__ : List[Any]=None ): SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = None with zipfile.ZipFile(A__ ) as z: for filename in z.namelist(): if not os.path.isdir(A__ ): # read the file if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]: with z.open(A__ ) as f: for line in f: SCREAMING_SNAKE_CASE = line.decode("UTF-8" ).strip() if filename == "failures_line.txt": try: # `error_line` is the place where `error` occurs SCREAMING_SNAKE_CASE = line[: line.index(": " )] SCREAMING_SNAKE_CASE = line[line.index(": " ) + len(": " ) :] errors.append([error_line, error] ) except Exception: # skip un-related lines pass elif filename == "summary_short.txt" and line.startswith("FAILED " ): # `test` is the test method that failed SCREAMING_SNAKE_CASE = line[len("FAILED " ) :] failed_tests.append(A__ ) elif filename == "job_name.txt": SCREAMING_SNAKE_CASE = line if len(A__ ) != len(A__ ): raise ValueError( F"`errors` and `failed_tests` should have the same number of elements. Got {len(A__ )} for `errors` " F"and {len(A__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some" " problem." ) SCREAMING_SNAKE_CASE = None if job_name and job_links: SCREAMING_SNAKE_CASE = job_links.get(A__ , A__ ) # A list with elements of the form (line of error, error, failed test) SCREAMING_SNAKE_CASE = [x + [y] + [job_link] for x, y in zip(A__ , A__ )] return result def __a ( A__ : Union[str, Any] , A__ : Union[str, Any]=None ): SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [os.path.join(A__ , A__ ) for p in os.listdir(A__ ) if p.endswith(".zip" )] for p in paths: errors.extend(get_errors_from_single_artifact(A__ , job_links=A__ ) ) return errors def __a ( A__ : List[str] , A__ : Tuple=None ): SCREAMING_SNAKE_CASE = Counter() counter.update([x[1] for x in logs] ) SCREAMING_SNAKE_CASE = counter.most_common() SCREAMING_SNAKE_CASE = {} for error, count in counts: if error_filter is None or error not in error_filter: SCREAMING_SNAKE_CASE = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]} SCREAMING_SNAKE_CASE = dict(sorted(r.items() , key=lambda A__ : item[1]["count"] , reverse=A__ ) ) return r def __a ( A__ : str ): SCREAMING_SNAKE_CASE = test.split("::" )[0] if test.startswith("tests/models/" ): SCREAMING_SNAKE_CASE = test.split("/" )[2] else: SCREAMING_SNAKE_CASE = None return test def __a ( A__ : List[str] , A__ : Dict=None ): SCREAMING_SNAKE_CASE = [(x[0], x[1], get_model(x[2] )) for x in logs] SCREAMING_SNAKE_CASE = [x for x in logs if x[2] is not None] SCREAMING_SNAKE_CASE = {x[2] for x in logs} SCREAMING_SNAKE_CASE = {} for test in tests: SCREAMING_SNAKE_CASE = Counter() # count by errors in `test` counter.update([x[1] for x in logs if x[2] == test] ) SCREAMING_SNAKE_CASE = counter.most_common() SCREAMING_SNAKE_CASE = {error: count for error, count in counts if (error_filter is None or error not in error_filter)} SCREAMING_SNAKE_CASE = sum(error_counts.values() ) if n_errors > 0: SCREAMING_SNAKE_CASE = {"count": n_errors, "errors": error_counts} SCREAMING_SNAKE_CASE = dict(sorted(r.items() , key=lambda A__ : item[1]["count"] , reverse=A__ ) ) return r def __a ( A__ : Dict ): SCREAMING_SNAKE_CASE = "| no. | error | status |" SCREAMING_SNAKE_CASE = "|-:|:-|:-|" SCREAMING_SNAKE_CASE = [header, sep] for error in reduced_by_error: SCREAMING_SNAKE_CASE = reduced_by_error[error]["count"] SCREAMING_SNAKE_CASE = F"| {count} | {error[:100]} | |" lines.append(A__ ) return "\n".join(A__ ) def __a ( A__ : Optional[Any] ): SCREAMING_SNAKE_CASE = "| model | no. of errors | major error | count |" SCREAMING_SNAKE_CASE = "|-:|-:|-:|-:|" SCREAMING_SNAKE_CASE = [header, sep] for model in reduced_by_model: SCREAMING_SNAKE_CASE = reduced_by_model[model]["count"] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = list(reduced_by_model[model]["errors"].items() )[0] SCREAMING_SNAKE_CASE = F"| {model} | {count} | {error[:60]} | {_count} |" lines.append(A__ ) return "\n".join(A__ ) if __name__ == "__main__": __A : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.') parser.add_argument( '--output_dir', type=str, required=True, help='Where to store the downloaded artifacts and other result files.', ) parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.') __A : Union[str, Any] = parser.parse_args() os.makedirs(args.output_dir, exist_ok=True) __A : int = get_job_links(args.workflow_run_id, token=args.token) __A : Dict = {} # To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee. # For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`. if _job_links: for k, v in _job_links.items(): # This is how GitHub actions combine job names. if " / " in k: __A : Union[str, Any] = k.find(' / ') __A : Optional[int] = k[index + len(' / ') :] __A : Optional[int] = v with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp: json.dump(job_links, fp, ensure_ascii=False, indent=4) __A : int = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) for idx, (name, url) in enumerate(artifacts.items()): download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) __A : Optional[int] = get_all_errors(args.output_dir, job_links=job_links) # `e[1]` is the error __A : Dict = Counter() counter.update([e[1] for e in errors]) # print the top 30 most common test errors __A : Optional[Any] = counter.most_common(3_0) for item in most_common: print(item) with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp: json.dump(errors, fp, ensure_ascii=False, indent=4) __A : str = reduce_by_error(errors) __A : int = reduce_by_model(errors) __A : Any = make_github_table(reduced_by_error) __A : List[str] = make_github_table_per_model(reduced_by_model) with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp: fp.write(sa) with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp: fp.write(sa)
16
1
import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class _SCREAMING_SNAKE_CASE ( datasets.BuilderConfig ): '''simple docstring''' lowerCamelCase__ = None class _SCREAMING_SNAKE_CASE ( datasets.ArrowBasedBuilder ): '''simple docstring''' lowerCamelCase__ = PandasConfig def _snake_case ( self : Tuple ): return datasets.DatasetInfo(features=self.config.features ) def _snake_case ( self : int , __lowerCamelCase : str ): if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" ) SCREAMING_SNAKE_CASE = dl_manager.download_and_extract(self.config.data_files ) if isinstance(__lowerCamelCase , (str, list, tuple) ): SCREAMING_SNAKE_CASE = data_files if isinstance(__lowerCamelCase , __lowerCamelCase ): SCREAMING_SNAKE_CASE = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive SCREAMING_SNAKE_CASE = [dl_manager.iter_files(__lowerCamelCase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )] SCREAMING_SNAKE_CASE = [] for split_name, files in data_files.items(): if isinstance(__lowerCamelCase , __lowerCamelCase ): SCREAMING_SNAKE_CASE = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive SCREAMING_SNAKE_CASE = [dl_manager.iter_files(__lowerCamelCase ) for file in files] splits.append(datasets.SplitGenerator(name=__lowerCamelCase , gen_kwargs={"files": files} ) ) return splits def _snake_case ( self : Optional[Any] , __lowerCamelCase : pa.Table ): if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example SCREAMING_SNAKE_CASE = table_cast(__lowerCamelCase , self.config.features.arrow_schema ) return pa_table def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Tuple ): for i, file in enumerate(itertools.chain.from_iterable(__lowerCamelCase ) ): with open(__lowerCamelCase , "rb" ) as f: SCREAMING_SNAKE_CASE = pa.Table.from_pandas(pd.read_pickle(__lowerCamelCase ) ) yield i, self._cast_table(__lowerCamelCase )
16
from ...configuration_utils import PretrainedConfig from ...utils import logging __A : Optional[int] = logging.get_logger(__name__) __A : Optional[Any] = { 'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json', # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = "gpt_neox" def __init__( self : Optional[int] , __lowerCamelCase : List[str]=50432 , __lowerCamelCase : int=6144 , __lowerCamelCase : Optional[Any]=44 , __lowerCamelCase : Tuple=64 , __lowerCamelCase : Optional[int]=24576 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Any=0.25 , __lowerCamelCase : List[Any]=10000 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : int=0.0 , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[Any]=2048 , __lowerCamelCase : Tuple=0.02 , __lowerCamelCase : Tuple=1e-5 , __lowerCamelCase : Dict=True , __lowerCamelCase : int=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : List[str]=False , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : str , ): super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = rotary_pct SCREAMING_SNAKE_CASE = rotary_emb_base SCREAMING_SNAKE_CASE = attention_dropout SCREAMING_SNAKE_CASE = hidden_dropout SCREAMING_SNAKE_CASE = classifier_dropout SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = use_cache SCREAMING_SNAKE_CASE = tie_word_embeddings SCREAMING_SNAKE_CASE = use_parallel_residual SCREAMING_SNAKE_CASE = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( "The hidden size is not divisble by the number of attention heads! Make sure to update them!" ) def _snake_case ( self : Union[str, Any] ): if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __lowerCamelCase ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " f"got {self.rope_scaling}" ) SCREAMING_SNAKE_CASE = self.rope_scaling.get("type" , __lowerCamelCase ) SCREAMING_SNAKE_CASE = self.rope_scaling.get("factor" , __lowerCamelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" ) if rope_scaling_factor is None or not isinstance(__lowerCamelCase , __lowerCamelCase ) or rope_scaling_factor <= 1.0: raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
16
1
import re import time from typing import Optional import IPython.display as disp from ..trainer_callback import TrainerCallback from ..trainer_utils import IntervalStrategy, has_length def __a ( A__ : str ): SCREAMING_SNAKE_CASE = int(A__ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = t // 3600, (t // 60) % 60, t % 60 return F"{h}:{m:02d}:{s:02d}" if h != 0 else F"{m:02d}:{s:02d}" def __a ( A__ : Dict , A__ : Union[str, Any] , A__ : Dict , A__ : Tuple , A__ : Union[str, Any]=300 ): # docstyle-ignore return F"\n <div>\n {prefix}\n <progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>\n {label}\n </div>\n " def __a ( A__ : str ): SCREAMING_SNAKE_CASE = "<table border=\"1\" class=\"dataframe\">\n" html_code += """ <thead>\n <tr style="text-align: left;">\n""" for i in items[0]: html_code += F" <th>{i}</th>\n" html_code += " </tr>\n </thead>\n <tbody>\n" for line in items[1:]: html_code += " <tr>\n" for elt in line: SCREAMING_SNAKE_CASE = F"{elt:.6f}" if isinstance(A__ , A__ ) else str(A__ ) html_code += F" <td>{elt}</td>\n" html_code += " </tr>\n" html_code += " </tbody>\n</table><p>" return html_code class _SCREAMING_SNAKE_CASE : '''simple docstring''' lowerCamelCase__ = 5 lowerCamelCase__ = 0.2 def __init__( self : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : bool = True , __lowerCamelCase : Optional["NotebookTrainingTracker"] = None , __lowerCamelCase : int = 300 , ): SCREAMING_SNAKE_CASE = total SCREAMING_SNAKE_CASE = "" if prefix is None else prefix SCREAMING_SNAKE_CASE = leave SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = width SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None def _snake_case ( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : bool = False , __lowerCamelCase : str = None ): SCREAMING_SNAKE_CASE = value if comment is not None: SCREAMING_SNAKE_CASE = comment if self.last_value is None: SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = time.time() SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = value SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = self.warmup SCREAMING_SNAKE_CASE = 1 self.update_bar(__lowerCamelCase ) elif value <= self.last_value and not force_update: return elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ): if self.first_calls > 0: self.first_calls -= 1 SCREAMING_SNAKE_CASE = time.time() SCREAMING_SNAKE_CASE = current_time - self.start_time # We could have value = self.start_value if the update is called twixe with the same start value. if value > self.start_value: SCREAMING_SNAKE_CASE = self.elapsed_time / (value - self.start_value) else: SCREAMING_SNAKE_CASE = None if value >= self.total: SCREAMING_SNAKE_CASE = self.total SCREAMING_SNAKE_CASE = None if not self.leave: self.close() elif self.average_time_per_item is not None: SCREAMING_SNAKE_CASE = self.average_time_per_item * (self.total - value) self.update_bar(__lowerCamelCase ) SCREAMING_SNAKE_CASE = value SCREAMING_SNAKE_CASE = current_time if self.average_time_per_item is None: SCREAMING_SNAKE_CASE = 1 else: SCREAMING_SNAKE_CASE = max(int(self.update_every / self.average_time_per_item ) , 1 ) def _snake_case ( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any]=None ): SCREAMING_SNAKE_CASE = " " * (len(str(self.total ) ) - len(str(__lowerCamelCase ) )) + str(__lowerCamelCase ) if self.elapsed_time is None: SCREAMING_SNAKE_CASE = f"[{spaced_value}/{self.total} : < :" elif self.predicted_remaining is None: SCREAMING_SNAKE_CASE = f"[{spaced_value}/{self.total} {format_time(self.elapsed_time )}" else: SCREAMING_SNAKE_CASE = ( f"[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <" f" {format_time(self.predicted_remaining )}" ) self.label += f", {1/self.average_time_per_item:.2f} it/s" self.label += "]" if self.comment is None or len(self.comment ) == 0 else f", {self.comment}]" self.display() def _snake_case ( self : Optional[Any] ): SCREAMING_SNAKE_CASE = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width ) if self.parent is not None: # If this is a child bar, the parent will take care of the display. self.parent.display() return if self.output is None: SCREAMING_SNAKE_CASE = disp.display(disp.HTML(self.html_code ) , display_id=__lowerCamelCase ) else: self.output.update(disp.HTML(self.html_code ) ) def _snake_case ( self : Union[str, Any] ): if self.parent is None and self.output is not None: self.output.update(disp.HTML("" ) ) class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' def __init__( self : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str]=None ): super().__init__(__lowerCamelCase ) SCREAMING_SNAKE_CASE = None if column_names is None else [column_names] SCREAMING_SNAKE_CASE = None def _snake_case ( self : Any ): SCREAMING_SNAKE_CASE = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width ) if self.inner_table is not None: self.html_code += text_to_html_table(self.inner_table ) if self.child_bar is not None: self.html_code += self.child_bar.html_code if self.output is None: SCREAMING_SNAKE_CASE = disp.display(disp.HTML(self.html_code ) , display_id=__lowerCamelCase ) else: self.output.update(disp.HTML(self.html_code ) ) def _snake_case ( self : Optional[Any] , __lowerCamelCase : Optional[Any] ): if self.inner_table is None: SCREAMING_SNAKE_CASE = [list(values.keys() ), list(values.values() )] else: SCREAMING_SNAKE_CASE = self.inner_table[0] if len(self.inner_table ) == 1: # We give a chance to update the column names at the first iteration for key in values.keys(): if key not in columns: columns.append(__lowerCamelCase ) SCREAMING_SNAKE_CASE = columns self.inner_table.append([values[c] for c in columns] ) def _snake_case ( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : List[str]=None , __lowerCamelCase : str=300 ): SCREAMING_SNAKE_CASE = NotebookProgressBar(__lowerCamelCase , prefix=__lowerCamelCase , parent=self , width=__lowerCamelCase ) return self.child_bar def _snake_case ( self : Tuple ): SCREAMING_SNAKE_CASE = None self.display() class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' def __init__( self : Dict ): SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = False def _snake_case ( self : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : str , **__lowerCamelCase : Tuple ): SCREAMING_SNAKE_CASE = "Epoch" if args.evaluation_strategy == IntervalStrategy.EPOCH else "Step" SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = [self.first_column] + ["Training Loss"] if args.evaluation_strategy != IntervalStrategy.NO: column_names.append("Validation Loss" ) SCREAMING_SNAKE_CASE = NotebookTrainingTracker(state.max_steps , __lowerCamelCase ) def _snake_case ( self : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , **__lowerCamelCase : List[str] ): SCREAMING_SNAKE_CASE = int(state.epoch ) if int(state.epoch ) == state.epoch else f"{state.epoch:.2f}" self.training_tracker.update( state.global_step + 1 , comment=f"Epoch {epoch}/{state.num_train_epochs}" , force_update=self._force_next_update , ) SCREAMING_SNAKE_CASE = False def _snake_case ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Any=None , **__lowerCamelCase : int ): if not has_length(__lowerCamelCase ): return if self.prediction_bar is None: if self.training_tracker is not None: SCREAMING_SNAKE_CASE = self.training_tracker.add_child(len(__lowerCamelCase ) ) else: SCREAMING_SNAKE_CASE = NotebookProgressBar(len(__lowerCamelCase ) ) self.prediction_bar.update(1 ) else: self.prediction_bar.update(self.prediction_bar.value + 1 ) def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , **__lowerCamelCase : List[Any] ): if self.prediction_bar is not None: self.prediction_bar.close() SCREAMING_SNAKE_CASE = None def _snake_case ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Dict=None , **__lowerCamelCase : Dict ): # Only for when there is no evaluation if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs: SCREAMING_SNAKE_CASE = {"Training Loss": logs["loss"]} # First column is necessarily Step sine we're not in epoch eval strategy SCREAMING_SNAKE_CASE = state.global_step self.training_tracker.write_line(__lowerCamelCase ) def _snake_case ( self : str , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : int ): if self.training_tracker is not None: SCREAMING_SNAKE_CASE = {"Training Loss": "No log", "Validation Loss": "No log"} for log in reversed(state.log_history ): if "loss" in log: SCREAMING_SNAKE_CASE = log["loss"] break if self.first_column == "Epoch": SCREAMING_SNAKE_CASE = int(state.epoch ) else: SCREAMING_SNAKE_CASE = state.global_step SCREAMING_SNAKE_CASE = "eval" for k in metrics: if k.endswith("_loss" ): SCREAMING_SNAKE_CASE = re.sub(r"\_loss$" , "" , __lowerCamelCase ) SCREAMING_SNAKE_CASE = metrics.pop("total_flos" , __lowerCamelCase ) SCREAMING_SNAKE_CASE = metrics.pop("epoch" , __lowerCamelCase ) SCREAMING_SNAKE_CASE = metrics.pop(f"{metric_key_prefix}_runtime" , __lowerCamelCase ) SCREAMING_SNAKE_CASE = metrics.pop(f"{metric_key_prefix}_samples_per_second" , __lowerCamelCase ) SCREAMING_SNAKE_CASE = metrics.pop(f"{metric_key_prefix}_steps_per_second" , __lowerCamelCase ) SCREAMING_SNAKE_CASE = metrics.pop(f"{metric_key_prefix}_jit_compilation_time" , __lowerCamelCase ) for k, v in metrics.items(): if k == f"{metric_key_prefix}_loss": SCREAMING_SNAKE_CASE = v else: SCREAMING_SNAKE_CASE = k.split("_" ) SCREAMING_SNAKE_CASE = " ".join([part.capitalize() for part in splits[1:]] ) SCREAMING_SNAKE_CASE = v self.training_tracker.write_line(__lowerCamelCase ) self.training_tracker.remove_child() SCREAMING_SNAKE_CASE = None # Evaluation takes a long time so we should force the next update. SCREAMING_SNAKE_CASE = True def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Dict , **__lowerCamelCase : Any ): self.training_tracker.update( state.global_step , comment=f"Epoch {int(state.epoch )}/{state.num_train_epochs}" , force_update=__lowerCamelCase ) SCREAMING_SNAKE_CASE = None
16
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A : List[Any] = { 'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'], 'processing_git': ['GitProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : int = [ 'GIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'GitForCausalLM', 'GitModel', 'GitPreTrainedModel', 'GitVisionModel', ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys __A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
16
1
from __future__ import annotations from decimal import Decimal from numpy import array def __a ( A__ : list[list[float]] ): SCREAMING_SNAKE_CASE = Decimal # Check if the provided matrix has 2 rows and 2 columns # since this implementation only works for 2x2 matrices if len(A__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2: # Calculate the determinant of the matrix SCREAMING_SNAKE_CASE = float( d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) ) if determinant == 0: raise ValueError("This matrix has no inverse." ) # Creates a copy of the matrix with swapped positions of the elements SCREAMING_SNAKE_CASE = [[0.0, 0.0], [0.0, 0.0]] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = matrix[1][1], matrix[0][0] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -matrix[1][0], -matrix[0][1] # Calculate the inverse of the matrix return [ [(float(d(A__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix ] elif ( len(A__ ) == 3 and len(matrix[0] ) == 3 and len(matrix[1] ) == 3 and len(matrix[2] ) == 3 ): # Calculate the determinant of the matrix using Sarrus rule SCREAMING_SNAKE_CASE = float( ( (d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] )) + (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] )) + (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] )) ) - ( (d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] )) + (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] )) + (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] )) ) ) if determinant == 0: raise ValueError("This matrix has no inverse." ) # Creating cofactor matrix SCREAMING_SNAKE_CASE = [ [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], ] SCREAMING_SNAKE_CASE = (d(matrix[1][1] ) * d(matrix[2][2] )) - ( d(matrix[1][2] ) * d(matrix[2][1] ) ) SCREAMING_SNAKE_CASE = -( (d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] )) ) SCREAMING_SNAKE_CASE = (d(matrix[1][0] ) * d(matrix[2][1] )) - ( d(matrix[1][1] ) * d(matrix[2][0] ) ) SCREAMING_SNAKE_CASE = -( (d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] )) ) SCREAMING_SNAKE_CASE = (d(matrix[0][0] ) * d(matrix[2][2] )) - ( d(matrix[0][2] ) * d(matrix[2][0] ) ) SCREAMING_SNAKE_CASE = -( (d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] )) ) SCREAMING_SNAKE_CASE = (d(matrix[0][1] ) * d(matrix[1][2] )) - ( d(matrix[0][2] ) * d(matrix[1][1] ) ) SCREAMING_SNAKE_CASE = -( (d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] )) ) SCREAMING_SNAKE_CASE = (d(matrix[0][0] ) * d(matrix[1][1] )) - ( d(matrix[0][1] ) * d(matrix[1][0] ) ) # Transpose the cofactor matrix (Adjoint matrix) SCREAMING_SNAKE_CASE = array(A__ ) for i in range(3 ): for j in range(3 ): SCREAMING_SNAKE_CASE = cofactor_matrix[j][i] # Inverse of the matrix using the formula (1/determinant) * adjoint matrix SCREAMING_SNAKE_CASE = array(A__ ) for i in range(3 ): for j in range(3 ): inverse_matrix[i][j] /= d(A__ ) # Calculate the inverse of the matrix return [[float(d(A__ ) ) or 0.0 for n in row] for row in inverse_matrix] raise ValueError("Please provide a matrix of size 2x2 or 3x3." )
16
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __A : Union[str, Any] = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) __A : List[Any] = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias')) rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias')) rename_keys.append( (f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append( ( f'transformer.decoder.layers.{i}.cross_attn.out_proj.weight', f'decoder.layers.{i}.encoder_attn.out_proj.weight', ) ) rename_keys.append( ( f'transformer.decoder.layers.{i}.cross_attn.out_proj.bias', f'decoder.layers.{i}.encoder_attn.out_proj.bias', ) ) rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias')) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (f'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', f'decoder.layers.{i}.sa_qcontent_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', f'decoder.layers.{i}.sa_kcontent_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.sa_qpos_proj.weight', f'decoder.layers.{i}.sa_qpos_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.sa_kpos_proj.weight', f'decoder.layers.{i}.sa_kpos_proj.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.weight', f'decoder.layers.{i}.sa_v_proj.weight')) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', f'decoder.layers.{i}.ca_qcontent_proj.weight') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', f'decoder.layers.{i}.ca_kcontent_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_kpos_proj.weight', f'decoder.layers.{i}.ca_kpos_proj.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.weight', f'decoder.layers.{i}.ca_v_proj.weight')) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', f'decoder.layers.{i}.ca_qpos_sine_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', f'decoder.layers.{i}.sa_qcontent_proj.bias') ) rename_keys.append( (f'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', f'decoder.layers.{i}.sa_kcontent_proj.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.sa_qpos_proj.bias', f'decoder.layers.{i}.sa_qpos_proj.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.sa_kpos_proj.bias', f'decoder.layers.{i}.sa_kpos_proj.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.bias', f'decoder.layers.{i}.sa_v_proj.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', f'decoder.layers.{i}.ca_qcontent_proj.bias') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', f'decoder.layers.{i}.ca_kcontent_proj.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.ca_kpos_proj.bias', f'decoder.layers.{i}.ca_kpos_proj.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.bias', f'decoder.layers.{i}.ca_v_proj.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', f'decoder.layers.{i}.ca_qpos_sine_proj.bias') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ('input_proj.weight', 'input_projection.weight'), ('input_proj.bias', 'input_projection.bias'), ('query_embed.weight', 'query_position_embeddings.weight'), ('transformer.decoder.norm.weight', 'decoder.layernorm.weight'), ('transformer.decoder.norm.bias', 'decoder.layernorm.bias'), ('class_embed.weight', 'class_labels_classifier.weight'), ('class_embed.bias', 'class_labels_classifier.bias'), ('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'), ('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'), ('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'), ('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'), ('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'), ('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'), ('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'), ('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'), ('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'), ('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'), ('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'), ('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'), ('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'), ('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'), ('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'), ('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'), ] ) def __a ( A__ : Dict , A__ : Dict , A__ : Any ): SCREAMING_SNAKE_CASE = state_dict.pop(A__ ) SCREAMING_SNAKE_CASE = val def __a ( A__ : Optional[int] ): SCREAMING_SNAKE_CASE = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: SCREAMING_SNAKE_CASE = key.replace("backbone.0.body" , "backbone.conv_encoder.model" ) SCREAMING_SNAKE_CASE = value else: SCREAMING_SNAKE_CASE = value return new_state_dict def __a ( A__ : Optional[Any] , A__ : Tuple=False ): SCREAMING_SNAKE_CASE = "" if is_panoptic: SCREAMING_SNAKE_CASE = "conditional_detr." # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" ) SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE = in_proj_weight[:256, :] SCREAMING_SNAKE_CASE = in_proj_bias[:256] SCREAMING_SNAKE_CASE = in_proj_weight[256:512, :] SCREAMING_SNAKE_CASE = in_proj_bias[256:512] SCREAMING_SNAKE_CASE = in_proj_weight[-256:, :] SCREAMING_SNAKE_CASE = in_proj_bias[-256:] def __a ( ): SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg" SCREAMING_SNAKE_CASE = Image.open(requests.get(A__ , stream=A__ ).raw ) return im @torch.no_grad() def __a ( A__ : List[str] , A__ : Union[str, Any] ): SCREAMING_SNAKE_CASE = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: SCREAMING_SNAKE_CASE = "resnet101" if "dc5" in model_name: SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = "panoptic" in model_name if is_panoptic: SCREAMING_SNAKE_CASE = 250 else: SCREAMING_SNAKE_CASE = 91 SCREAMING_SNAKE_CASE = "huggingface/label-files" SCREAMING_SNAKE_CASE = "coco-detection-id2label.json" SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(A__ , A__ , repo_type="dataset" ) , "r" ) ) SCREAMING_SNAKE_CASE = {int(A__ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE = idalabel SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} # load image processor SCREAMING_SNAKE_CASE = "coco_panoptic" if is_panoptic else "coco_detection" SCREAMING_SNAKE_CASE = ConditionalDetrImageProcessor(format=A__ ) # prepare image SCREAMING_SNAKE_CASE = prepare_img() SCREAMING_SNAKE_CASE = image_processor(images=A__ , return_tensors="pt" ) SCREAMING_SNAKE_CASE = encoding["pixel_values"] logger.info(F"Converting model {model_name}..." ) # load original model from torch hub SCREAMING_SNAKE_CASE = torch.hub.load("DeppMeng/ConditionalDETR" , A__ , pretrained=A__ ).eval() SCREAMING_SNAKE_CASE = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: SCREAMING_SNAKE_CASE = "conditional_detr." + src rename_key(A__ , A__ , A__ ) SCREAMING_SNAKE_CASE = rename_backbone_keys(A__ ) # query, key and value matrices need special treatment read_in_q_k_v(A__ , is_panoptic=A__ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them SCREAMING_SNAKE_CASE = "conditional_detr.model." if is_panoptic else "model." for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("conditional_detr" ) and not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ) ): SCREAMING_SNAKE_CASE = state_dict.pop(A__ ) SCREAMING_SNAKE_CASE = val elif "class_labels_classifier" in key or "bbox_predictor" in key: SCREAMING_SNAKE_CASE = state_dict.pop(A__ ) SCREAMING_SNAKE_CASE = val elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ): continue else: SCREAMING_SNAKE_CASE = state_dict.pop(A__ ) SCREAMING_SNAKE_CASE = val else: if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ): SCREAMING_SNAKE_CASE = state_dict.pop(A__ ) SCREAMING_SNAKE_CASE = val # finally, create HuggingFace model and load state dict SCREAMING_SNAKE_CASE = ConditionalDetrForSegmentation(A__ ) if is_panoptic else ConditionalDetrForObjectDetection(A__ ) model.load_state_dict(A__ ) model.eval() model.push_to_hub(repo_id=A__ , organization="DepuMeng" , commit_message="Add model" ) # verify our conversion SCREAMING_SNAKE_CASE = conditional_detr(A__ ) SCREAMING_SNAKE_CASE = model(A__ ) assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 ) # Save model and image processor logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." ) Path(A__ ).mkdir(exist_ok=A__ ) model.save_pretrained(A__ ) image_processor.save_pretrained(A__ ) if __name__ == "__main__": __A : str = argparse.ArgumentParser() parser.add_argument( '--model_name', default='conditional_detr_resnet50', type=str, help='Name of the CONDITIONAL_DETR model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) __A : int = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
16
1
import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def __a ( A__ : str , A__ : Any=10 ): SCREAMING_SNAKE_CASE = [] for _ in range(A__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def __a ( A__ : List[Any] , A__ : Optional[Any]=10 ): SCREAMING_SNAKE_CASE = [] for step in range(A__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE = os.path.join(A__ , "schedule.bin" ) torch.save(scheduler.state_dict() , A__ ) SCREAMING_SNAKE_CASE = torch.load(A__ ) scheduler.load_state_dict(A__ ) return lrs @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ): self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) ) for a, b in zip(__lowerCamelCase , __lowerCamelCase ): self.assertAlmostEqual(__lowerCamelCase , __lowerCamelCase , delta=__lowerCamelCase ) def _snake_case ( self : List[str] ): SCREAMING_SNAKE_CASE = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__lowerCamelCase ) SCREAMING_SNAKE_CASE = torch.tensor([0.4, 0.2, -0.5] ) SCREAMING_SNAKE_CASE = nn.MSELoss() # No warmup, constant schedule, no gradient clipping SCREAMING_SNAKE_CASE = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 ) for _ in range(100 ): SCREAMING_SNAKE_CASE = criterion(__lowerCamelCase , __lowerCamelCase ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__lowerCamelCase ) SCREAMING_SNAKE_CASE = torch.tensor([0.4, 0.2, -0.5] ) SCREAMING_SNAKE_CASE = nn.MSELoss() # No warmup, constant schedule, no gradient clipping SCREAMING_SNAKE_CASE = Adafactor( params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=__lowerCamelCase , weight_decay=0.0 , relative_step=__lowerCamelCase , scale_parameter=__lowerCamelCase , warmup_init=__lowerCamelCase , ) for _ in range(1000 ): SCREAMING_SNAKE_CASE = criterion(__lowerCamelCase , __lowerCamelCase ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None lowerCamelCase__ = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None lowerCamelCase__ = 1_0 def _snake_case ( self : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Any=None ): self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) ) for a, b in zip(__lowerCamelCase , __lowerCamelCase ): self.assertAlmostEqual(__lowerCamelCase , __lowerCamelCase , delta=__lowerCamelCase , msg=__lowerCamelCase ) def _snake_case ( self : Tuple ): SCREAMING_SNAKE_CASE = {"num_warmup_steps": 2, "num_training_steps": 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) SCREAMING_SNAKE_CASE = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {"num_warmup_steps": 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, "num_cycles": 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, "power": 2.0, "lr_end": 1e-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {"num_warmup_steps": 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = data SCREAMING_SNAKE_CASE = scheduler_func(self.optimizer , **__lowerCamelCase ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) SCREAMING_SNAKE_CASE = unwrap_schedule(__lowerCamelCase , self.num_steps ) self.assertListAlmostEqual( __lowerCamelCase , __lowerCamelCase , tol=1e-2 , msg=f"failed for {scheduler_func} in normal scheduler" , ) SCREAMING_SNAKE_CASE = scheduler_func(self.optimizer , **__lowerCamelCase ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(__lowerCamelCase ) # wrap to test picklability of the schedule SCREAMING_SNAKE_CASE = unwrap_and_save_reload_schedule(__lowerCamelCase , self.num_steps ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase , msg=f"failed for {scheduler_func} in save and reload" ) class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : str , __lowerCamelCase : List[Any] ): SCREAMING_SNAKE_CASE = fn def __call__( self : List[Any] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : List[Any] ): return self.fn(*__lowerCamelCase , **__lowerCamelCase ) @classmethod def _snake_case ( self : List[Any] , __lowerCamelCase : Dict ): SCREAMING_SNAKE_CASE = list(map(self , scheduler.lr_lambdas ) )
16
from __future__ import annotations def __a ( A__ : list[int | str] ): create_state_space_tree(A__ , [] , 0 , [0 for i in range(len(A__ ) )] ) def __a ( A__ : list[int | str] , A__ : list[int | str] , A__ : int , A__ : list[int] , ): if index == len(A__ ): print(A__ ) return for i in range(len(A__ ) ): if not index_used[i]: current_sequence.append(sequence[i] ) SCREAMING_SNAKE_CASE = True create_state_space_tree(A__ , A__ , index + 1 , A__ ) current_sequence.pop() SCREAMING_SNAKE_CASE = False __A : list[int | str] = [3, 1, 2, 4] generate_all_permutations(sequence) __A : list[int | str] = ["A", "B", "C"] generate_all_permutations(sequence_a)
16
1
import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer __A : Any = 'bart' __A : str = True @st.cache(allow_output_mutation=A__ ) def __a ( ): if LOAD_DENSE_INDEX: SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("yjernite/retribert-base-uncased" ) SCREAMING_SNAKE_CASE = AutoModel.from_pretrained("yjernite/retribert-base-uncased" ).to("cuda:0" ) SCREAMING_SNAKE_CASE = qar_model.eval() else: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = (None, None) if MODEL_TYPE == "bart": SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("yjernite/bart_eli5" ) SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained("yjernite/bart_eli5" ).to("cuda:0" ) SCREAMING_SNAKE_CASE = torch.load("seq2seq_models/eli5_bart_model_blm_2.pth" ) sas_model.load_state_dict(save_dict["model"] ) SCREAMING_SNAKE_CASE = sas_model.eval() else: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = make_qa_sas_model( model_name="t5-small" , from_file="seq2seq_models/eli5_t5_model_1024_4.pth" , device="cuda:0" ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=A__ ) def __a ( ): if LOAD_DENSE_INDEX: SCREAMING_SNAKE_CASE = faiss.StandardGpuResources() SCREAMING_SNAKE_CASE = datasets.load_dataset(path="wiki_snippets" , name="wiki40b_en_100_0" )["train"] SCREAMING_SNAKE_CASE = np.memmap( "wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat" , dtype="float32" , mode="r" , shape=(wikiaab_passages.num_rows, 128) , ) SCREAMING_SNAKE_CASE = faiss.IndexFlatIP(128 ) SCREAMING_SNAKE_CASE = faiss.index_cpu_to_gpu(A__ , 1 , A__ ) wikiaab_gpu_index_flat.add(A__ ) # TODO fix for larger GPU else: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = (None, None) SCREAMING_SNAKE_CASE = Elasticsearch([{"host": "localhost", "port": "9200"}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=A__ ) def __a ( ): SCREAMING_SNAKE_CASE = datasets.load_dataset("eli5" , name="LFQA_reddit" ) SCREAMING_SNAKE_CASE = elia["train_eli5"] SCREAMING_SNAKE_CASE = np.memmap( "eli5_questions_reps.dat" , dtype="float32" , mode="r" , shape=(elia_train.num_rows, 128) ) SCREAMING_SNAKE_CASE = faiss.IndexFlatIP(128 ) eli5_train_q_index.add(A__ ) return (elia_train, eli5_train_q_index) __A , __A , __A : Dict = load_indexes() __A , __A , __A , __A : Union[str, Any] = load_models() __A , __A : Union[str, Any] = load_train_data() def __a ( A__ : Any , A__ : List[str]=10 ): SCREAMING_SNAKE_CASE = embed_questions_for_retrieval([question] , A__ , A__ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = eli5_train_q_index.search(A__ , A__ ) SCREAMING_SNAKE_CASE = [elia_train[int(A__ )] for i in I[0]] return nn_examples def __a ( A__ : Optional[int] , A__ : Union[str, Any]="wiki40b" , A__ : List[str]="dense" , A__ : int=10 ): if source == "none": SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = (" <P> ".join(["" for _ in range(11 )] ).strip(), []) else: if method == "dense": SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = query_qa_dense_index( A__ , A__ , A__ , A__ , A__ , A__ ) else: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = query_es_index( A__ , A__ , index_name="english_wiki40b_snippets_100w" , n_results=A__ , ) SCREAMING_SNAKE_CASE = [ (res["article_title"], res["section_title"].strip(), res["score"], res["passage_text"]) for res in hit_lst ] SCREAMING_SNAKE_CASE = "question: {} context: {}".format(A__ , A__ ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda A__ : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda A__ : None), } ) def __a ( A__ : List[str] , A__ : Tuple , A__ : Dict , A__ : Optional[int]=64 , A__ : Tuple=256 , A__ : Dict=False , A__ : Optional[Any]=2 , A__ : Optional[int]=0.9_5 , A__ : Any=0.8 ): with torch.no_grad(): SCREAMING_SNAKE_CASE = qa_sas_generate( A__ , A__ , A__ , num_answers=1 , num_beams=A__ , min_len=A__ , max_len=A__ , do_sample=A__ , temp=A__ , top_p=A__ , top_k=A__ , max_input_length=1024 , device="cuda:0" , )[0] return (answer, support_list) st.title('Long Form Question Answering with ELI5') # Start sidebar __A : int = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>' __A : Tuple = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia __A : int = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n' st.sidebar.markdown(description, unsafe_allow_html=True) __A : List[Any] = [ 'Answer the question', 'View the retrieved document only', 'View the most similar ELI5 question and answer', 'Show me everything, please!', ] __A : Dict = st.sidebar.checkbox('Demo options') if demo_options: __A : List[Any] = st.sidebar.selectbox( '', action_list, index=3, ) __A : Optional[int] = action_list.index(action_st) __A : Optional[Any] = st.sidebar.selectbox( '', ['Show full text of passages', 'Show passage section titles'], index=0, ) __A : List[Any] = show_type == 'Show full text of passages' else: __A : Tuple = 3 __A : List[str] = True __A : Tuple = st.sidebar.checkbox('Retrieval options') if retrieval_options: __A : str = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n ' st.sidebar.markdown(retriever_info) __A : List[str] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none']) __A : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed']) else: __A : str = 'wiki40b' __A : str = 'dense' __A : Optional[Any] = 'beam' __A : int = 2 __A : Dict = 6_4 __A : Optional[Any] = 2_5_6 __A : Dict = None __A : Optional[Any] = None __A : Optional[int] = st.sidebar.checkbox('Generation options') if generate_options: __A : Dict = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n ' st.sidebar.markdown(generate_info) __A : Optional[Any] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled']) __A : Union[str, Any] = st.sidebar.slider( 'Minimum generation length', min_value=8, max_value=2_5_6, value=6_4, step=8, format=None, key=None ) __A : Any = st.sidebar.slider( 'Maximum generation length', min_value=6_4, max_value=5_1_2, value=2_5_6, step=1_6, format=None, key=None ) if sampled == "beam": __A : Optional[int] = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: __A : Any = st.sidebar.slider( 'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None ) __A : Optional[int] = st.sidebar.slider( 'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None ) __A : int = None # start main text __A : Optional[int] = [ '<MY QUESTION>', 'How do people make chocolate?', 'Why do we get a fever when we are sick?', 'How can different animals perceive different colors?', 'What is natural language processing?', 'What\'s the best way to treat a sunburn?', 'What exactly are vitamins ?', 'How does nuclear energy provide electricity?', 'What\'s the difference between viruses and bacteria?', 'Why are flutes classified as woodwinds when most of them are made out of metal ?', 'Why do people like drinking coffee even though it tastes so bad?', 'What happens when wine ages? How does it make the wine taste better?', 'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?', 'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?', 'How does New Zealand have so many large bird predators?', ] __A : Any = st.selectbox( 'What would you like to ask? ---- select <MY QUESTION> to enter a new query', questions_list, index=1, ) if question_s == "<MY QUESTION>": __A : List[Any] = st.text_input('Enter your question here:', '') else: __A : List[Any] = question_s if st.button('Show me!'): if action in [0, 1, 3]: if index_type == "mixed": __A , __A : str = make_support(question, source=wiki_source, method='dense', n_results=1_0) __A , __A : List[str] = make_support(question, source=wiki_source, method='sparse', n_results=1_0) __A : Optional[Any] = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] __A : Union[str, Any] = support_list[:1_0] __A : List[str] = '<P> ' + ' <P> '.join([res[-1] for res in support_list]) else: __A , __A : List[Any] = make_support(question, source=wiki_source, method=index_type, n_results=1_0) if action in [0, 3]: __A , __A : Tuple = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == 'sampled'), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown('### The model generated answer is:') st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:') for i, res in enumerate(support_list): __A : Optional[int] = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_')) __A : List[Any] = res[1].strip() if sec_titles == "": __A : List[str] = '[{}]({})'.format(res[0], wiki_url) else: __A : List[Any] = sec_titles.split(' & ') __A : Union[str, Any] = ' & '.join( ['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list] ) st.markdown( '{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( '> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True ) if action in [2, 3]: __A : Optional[int] = find_nearest_training(question) __A : List[Any] = nn_train_list[0] st.markdown( '--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title']) ) __A : Any = [ '{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != ''])) for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score'])) if i == 0 or sc > 2 ] st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st))) __A : Optional[int] = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n' st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
16
def __a ( A__ : int = 1000 ): SCREAMING_SNAKE_CASE = 3 SCREAMING_SNAKE_CASE = 0 while a < n: if a % 3 == 0 or a % 5 == 0: result += a elif a % 15 == 0: result -= a a += 1 return result if __name__ == "__main__": print(f'{solution() = }')
16
1
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging __A : List[str] = logging.get_logger(__name__) __A : List[Any] = {'vocab_file': 'spiece.model'} __A : Union[str, Any] = { 'vocab_file': { 'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model', 'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model', } } __A : Any = { 'xlnet-base-cased': None, 'xlnet-large-cased': None, } # Segments (not really needed) __A : Tuple = 0 __A : Tuple = 1 __A : str = 2 __A : Optional[Any] = 3 __A : Union[str, Any] = 4 class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ = "left" def __init__( self : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str]=False , __lowerCamelCase : Any=True , __lowerCamelCase : Dict=False , __lowerCamelCase : Optional[int]="<s>" , __lowerCamelCase : int="</s>" , __lowerCamelCase : Dict="<unk>" , __lowerCamelCase : Optional[Any]="<sep>" , __lowerCamelCase : Dict="<pad>" , __lowerCamelCase : List[str]="<cls>" , __lowerCamelCase : str="<mask>" , __lowerCamelCase : int=["<eop>", "<eod>"] , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Any , ): # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__lowerCamelCase , remove_space=__lowerCamelCase , keep_accents=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , ) SCREAMING_SNAKE_CASE = 3 SCREAMING_SNAKE_CASE = do_lower_case SCREAMING_SNAKE_CASE = remove_space SCREAMING_SNAKE_CASE = keep_accents SCREAMING_SNAKE_CASE = vocab_file SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__lowerCamelCase ) @property def _snake_case ( self : Dict ): return len(self.sp_model ) def _snake_case ( self : List[Any] ): SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Optional[Any] ): SCREAMING_SNAKE_CASE = self.__dict__.copy() SCREAMING_SNAKE_CASE = None return state def __setstate__( self : Optional[Any] , __lowerCamelCase : Tuple ): SCREAMING_SNAKE_CASE = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _snake_case ( self : str , __lowerCamelCase : Optional[Any] ): if self.remove_space: SCREAMING_SNAKE_CASE = " ".join(inputs.strip().split() ) else: SCREAMING_SNAKE_CASE = inputs SCREAMING_SNAKE_CASE = outputs.replace("``" , "\"" ).replace("''" , "\"" ) if not self.keep_accents: SCREAMING_SNAKE_CASE = unicodedata.normalize("NFKD" , __lowerCamelCase ) SCREAMING_SNAKE_CASE = "".join([c for c in outputs if not unicodedata.combining(__lowerCamelCase )] ) if self.do_lower_case: SCREAMING_SNAKE_CASE = outputs.lower() return outputs def _snake_case ( self : Optional[int] , __lowerCamelCase : str ): SCREAMING_SNAKE_CASE = self.preprocess_text(__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase ) SCREAMING_SNAKE_CASE = [] for piece in pieces: if len(__lowerCamelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit(): SCREAMING_SNAKE_CASE = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowerCamelCase , "" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: SCREAMING_SNAKE_CASE = cur_pieces[1:] else: SCREAMING_SNAKE_CASE = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(__lowerCamelCase ) else: new_pieces.append(__lowerCamelCase ) return new_pieces def _snake_case ( self : Tuple , __lowerCamelCase : Union[str, Any] ): return self.sp_model.PieceToId(__lowerCamelCase ) def _snake_case ( self : Dict , __lowerCamelCase : Union[str, Any] ): return self.sp_model.IdToPiece(__lowerCamelCase ) def _snake_case ( self : Optional[Any] , __lowerCamelCase : str ): SCREAMING_SNAKE_CASE = "".join(__lowerCamelCase ).replace(__lowerCamelCase , " " ).strip() return out_string def _snake_case ( self : Any , __lowerCamelCase : List[int] , __lowerCamelCase : bool = False , __lowerCamelCase : bool = None , __lowerCamelCase : bool = True , **__lowerCamelCase : Dict , ): SCREAMING_SNAKE_CASE = kwargs.pop("use_source_tokenizer" , __lowerCamelCase ) SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(__lowerCamelCase , skip_special_tokens=__lowerCamelCase ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(__lowerCamelCase ) ) SCREAMING_SNAKE_CASE = [] sub_texts.append(__lowerCamelCase ) else: current_sub_text.append(__lowerCamelCase ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(__lowerCamelCase ) ) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens SCREAMING_SNAKE_CASE = "".join(__lowerCamelCase ) SCREAMING_SNAKE_CASE = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: SCREAMING_SNAKE_CASE = self.clean_up_tokenization(__lowerCamelCase ) return clean_text else: return text def _snake_case ( self : Union[str, Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): SCREAMING_SNAKE_CASE = [self.sep_token_id] SCREAMING_SNAKE_CASE = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _snake_case ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase ) if token_ids_a is not None: return ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1, 1] return ([0] * len(__lowerCamelCase )) + [1, 1] def _snake_case ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): SCREAMING_SNAKE_CASE = [self.sep_token_id] SCREAMING_SNAKE_CASE = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _snake_case ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): if not os.path.isdir(__lowerCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return SCREAMING_SNAKE_CASE = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCamelCase , "wb" ) as fi: SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto() fi.write(__lowerCamelCase ) return (out_vocab_file,)
16
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A : Dict = { 'configuration_bigbird_pegasus': [ 'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BigBirdPegasusConfig', 'BigBirdPegasusOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : int = [ 'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST', 'BigBirdPegasusForCausalLM', 'BigBirdPegasusForConditionalGeneration', 'BigBirdPegasusForQuestionAnswering', 'BigBirdPegasusForSequenceClassification', 'BigBirdPegasusModel', 'BigBirdPegasusPreTrainedModel', ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys __A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
16
1
import qiskit def __a ( A__ : int = 2 ): SCREAMING_SNAKE_CASE = qubits # Using Aer's simulator SCREAMING_SNAKE_CASE = qiskit.Aer.get_backend("aer_simulator" ) # Creating a Quantum Circuit acting on the q register SCREAMING_SNAKE_CASE = qiskit.QuantumCircuit(A__ , A__ ) # Adding a H gate on qubit 0 (now q0 in superposition) circuit.h(0 ) for i in range(1 , A__ ): # Adding CX (CNOT) gate circuit.cx(i - 1 , A__ ) # Mapping the quantum measurement to the classical bits circuit.measure(list(range(A__ ) ) , list(range(A__ ) ) ) # Now measuring any one qubit would affect other qubits to collapse # their super position and have same state as the measured one. # Executing the circuit on the simulator SCREAMING_SNAKE_CASE = qiskit.execute(A__ , A__ , shots=1000 ) return job.result().get_counts(A__ ) if __name__ == "__main__": print(f'Total count for various states are: {quantum_entanglement(3)}')
16
import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Any ): SCREAMING_SNAKE_CASE = "hf-internal-testing/tiny-random-t5" SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer("This is me" , return_tensors="pt" ) SCREAMING_SNAKE_CASE = model.to_bettertransformer() self.assertTrue(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) SCREAMING_SNAKE_CASE = model.generate(**__lowerCamelCase ) SCREAMING_SNAKE_CASE = model.reverse_bettertransformer() self.assertFalse(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase ) self.assertFalse( any("BetterTransformer" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) SCREAMING_SNAKE_CASE = model_reloaded.generate(**__lowerCamelCase ) self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase ) ) def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = "hf-internal-testing/tiny-random-t5" SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(__lowerCamelCase ): model.save_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = model.reverse_bettertransformer() model.save_pretrained(__lowerCamelCase )
16
1
import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = MgpstrTokenizer lowerCamelCase__ = False lowerCamelCase__ = {} lowerCamelCase__ = False def _snake_case ( self : Optional[Any] ): super().setUp() # fmt: off SCREAMING_SNAKE_CASE = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"] # fmt: on SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(__lowerCamelCase ) + "\n" ) def _snake_case ( self : Dict , **__lowerCamelCase : str ): return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def _snake_case ( self : Union[str, Any] , __lowerCamelCase : List[str] ): SCREAMING_SNAKE_CASE = "tester" SCREAMING_SNAKE_CASE = "tester" return input_text, output_text @unittest.skip("MGP-STR always lower cases letters." ) def _snake_case ( self : Union[str, Any] ): pass def _snake_case ( self : str ): SCREAMING_SNAKE_CASE = self.get_tokenizers(do_lower_case=__lowerCamelCase ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): SCREAMING_SNAKE_CASE = "[SPECIAL_TOKEN]" tokenizer.add_special_tokens({"cls_token": special_token} ) SCREAMING_SNAKE_CASE = tokenizer.encode([special_token] , add_special_tokens=__lowerCamelCase ) self.assertEqual(len(__lowerCamelCase ) , 1 ) SCREAMING_SNAKE_CASE = tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase ) self.assertTrue(special_token not in decoded ) def _snake_case ( self : Tuple ): SCREAMING_SNAKE_CASE = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_input_output_texts(__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer.tokenize(__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(__lowerCamelCase ) self.assertNotEqual(len(__lowerCamelCase ) , 0 ) SCREAMING_SNAKE_CASE = tokenizer.decode(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) self.assertEqual(text_a.replace(" " , "" ) , __lowerCamelCase ) @unittest.skip("MGP-STR tokenizer only handles one sequence." ) def _snake_case ( self : Optional[int] ): pass @unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" ) def _snake_case ( self : Tuple ): pass
16
import copy import inspect import unittest from transformers import PretrainedConfig, SwiftFormerConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwiftFormerForImageClassification, SwiftFormerModel from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : int=13 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : str=True , __lowerCamelCase : Any=True , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Optional[int]=224 , __lowerCamelCase : Any=1000 , __lowerCamelCase : Optional[Any]=[3, 3, 6, 4] , __lowerCamelCase : List[Any]=[48, 56, 112, 220] , ): SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = is_training SCREAMING_SNAKE_CASE = use_labels SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = num_labels SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = layer_depths SCREAMING_SNAKE_CASE = embed_dims def _snake_case ( self : Optional[Any] ): SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE = None if self.use_labels: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels ) SCREAMING_SNAKE_CASE = self.get_config() return config, pixel_values, labels def _snake_case ( self : Dict ): return SwiftFormerConfig( depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=__lowerCamelCase , layer_scale_init_value=1e-5 , ) def _snake_case ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ): SCREAMING_SNAKE_CASE = SwiftFormerModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) ) def _snake_case ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] ): SCREAMING_SNAKE_CASE = self.num_labels SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE = model(__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self : int ): ((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else () lowerCamelCase__ = ( {"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification} if is_torch_available() else {} ) lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = SwiftFormerModelTester(self ) SCREAMING_SNAKE_CASE = ConfigTester( self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , ) def _snake_case ( self : List[Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="SwiftFormer does not use inputs_embeds" ) def _snake_case ( self : Optional[int] ): pass def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) SCREAMING_SNAKE_CASE = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) ) def _snake_case ( self : List[Any] ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) SCREAMING_SNAKE_CASE = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def _snake_case ( self : Tuple ): for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE = SwiftFormerModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) @unittest.skip(reason="SwiftFormer does not output attentions" ) def _snake_case ( self : Union[str, Any] ): pass def _snake_case ( self : Optional[Any] ): def check_hidden_states_output(__lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Tuple ): SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) SCREAMING_SNAKE_CASE = outputs.hidden_states SCREAMING_SNAKE_CASE = 8 self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase ) # TODO # SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width) # with the width and height being successively divided by 2, after every 2 blocks for i in range(len(__lowerCamelCase ) ): self.assertEqual( hidden_states[i].shape , torch.Size( [ self.model_tester.batch_size, self.model_tester.embed_dims[i // 2], (self.model_tester.image_size // 4) // 2 ** (i // 2), (self.model_tester.image_size // 4) // 2 ** (i // 2), ] ) , ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def _snake_case ( self : List[Any] ): def _config_zero_init(__lowerCamelCase : List[Any] ): SCREAMING_SNAKE_CASE = copy.deepcopy(__lowerCamelCase ) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(__lowerCamelCase , __lowerCamelCase , 1e-10 ) if isinstance(getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ): SCREAMING_SNAKE_CASE = _config_zero_init(getattr(__lowerCamelCase , __lowerCamelCase ) ) setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) return configs_no_init SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE = _config_zero_init(__lowerCamelCase ) for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = model_class(config=__lowerCamelCase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , ) @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def _snake_case ( self : str ): pass def __a ( ): SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @cached_property def _snake_case ( self : List[str] ): return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None @slow def _snake_case ( self : List[Any] ): SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.default_image_processor SCREAMING_SNAKE_CASE = prepare_img() SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ) # verify the logits SCREAMING_SNAKE_CASE = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) SCREAMING_SNAKE_CASE = torch.tensor([[-2.17_03e00, 2.11_07e00, -2.08_11e00]] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
16
1
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __A : List[str] = logging.get_logger(__name__) __A : List[Any] = { 'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = "sew-d" def __init__( self : Any , __lowerCamelCase : Union[str, Any]=32 , __lowerCamelCase : List[str]=768 , __lowerCamelCase : List[Any]=12 , __lowerCamelCase : Union[str, Any]=12 , __lowerCamelCase : List[Any]=3072 , __lowerCamelCase : Any=2 , __lowerCamelCase : Optional[Any]=512 , __lowerCamelCase : Optional[int]=256 , __lowerCamelCase : Any=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : Optional[int]=("p2c", "c2p") , __lowerCamelCase : Optional[int]="layer_norm" , __lowerCamelCase : Dict="gelu_python" , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : str=0.0 , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : Tuple=1e-7 , __lowerCamelCase : str=1e-5 , __lowerCamelCase : Optional[Any]="group" , __lowerCamelCase : Optional[int]="gelu" , __lowerCamelCase : List[str]=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __lowerCamelCase : Dict=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __lowerCamelCase : List[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __lowerCamelCase : List[str]=False , __lowerCamelCase : str=128 , __lowerCamelCase : Optional[Any]=16 , __lowerCamelCase : List[str]=True , __lowerCamelCase : List[str]=0.05 , __lowerCamelCase : List[Any]=10 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : Union[str, Any]=10 , __lowerCamelCase : Dict=0 , __lowerCamelCase : Optional[int]="mean" , __lowerCamelCase : Dict=False , __lowerCamelCase : Any=False , __lowerCamelCase : Optional[Any]=256 , __lowerCamelCase : Union[str, Any]=0 , __lowerCamelCase : Dict=1 , __lowerCamelCase : Optional[Any]=2 , **__lowerCamelCase : Tuple , ): super().__init__(**__lowerCamelCase , pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase ) SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = feat_extract_norm SCREAMING_SNAKE_CASE = feat_extract_activation SCREAMING_SNAKE_CASE = list(__lowerCamelCase ) SCREAMING_SNAKE_CASE = list(__lowerCamelCase ) SCREAMING_SNAKE_CASE = list(__lowerCamelCase ) SCREAMING_SNAKE_CASE = conv_bias SCREAMING_SNAKE_CASE = num_conv_pos_embeddings SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups SCREAMING_SNAKE_CASE = len(self.conv_dim ) SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = squeeze_factor SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = position_buckets SCREAMING_SNAKE_CASE = share_att_key SCREAMING_SNAKE_CASE = relative_attention SCREAMING_SNAKE_CASE = norm_rel_ebd SCREAMING_SNAKE_CASE = list(__lowerCamelCase ) SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = hidden_dropout SCREAMING_SNAKE_CASE = attention_dropout SCREAMING_SNAKE_CASE = activation_dropout SCREAMING_SNAKE_CASE = feat_proj_dropout SCREAMING_SNAKE_CASE = final_dropout SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = feature_layer_norm_eps SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect." "It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`," f"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)" f"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 SCREAMING_SNAKE_CASE = apply_spec_augment SCREAMING_SNAKE_CASE = mask_time_prob SCREAMING_SNAKE_CASE = mask_time_length SCREAMING_SNAKE_CASE = mask_time_min_masks SCREAMING_SNAKE_CASE = mask_feature_prob SCREAMING_SNAKE_CASE = mask_feature_length SCREAMING_SNAKE_CASE = mask_feature_min_masks # ctc loss SCREAMING_SNAKE_CASE = ctc_loss_reduction SCREAMING_SNAKE_CASE = ctc_zero_infinity # sequence classification SCREAMING_SNAKE_CASE = use_weighted_layer_sum SCREAMING_SNAKE_CASE = classifier_proj_size @property def _snake_case ( self : Tuple ): return functools.reduce(operator.mul , self.conv_stride , 1 )
16
import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings __A : Optional[Any] = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = field(default=__snake_case , metadata={"help": "Whether to use SortishSampler or not."} ) lowerCamelCase__ = field( default=__snake_case , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} ) lowerCamelCase__ = field( default=__snake_case , metadata={ "help": ( "The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `max_length` value of the model configuration." ) } , ) lowerCamelCase__ = field( default=__snake_case , metadata={ "help": ( "The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `num_beams` value of the model configuration." ) } , ) lowerCamelCase__ = field( default=__snake_case , metadata={ "help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction." } , ) def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = super().to_dict() for k, v in d.items(): if isinstance(__lowerCamelCase , __lowerCamelCase ): SCREAMING_SNAKE_CASE = v.to_dict() return d
16
1
import argparse import logging import sys from unittest.mock import patch import run_glue_deebert from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow logging.basicConfig(level=logging.DEBUG) __A : Union[str, Any] = logging.getLogger() def __a ( ): SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument("-f" ) SCREAMING_SNAKE_CASE = parser.parse_args() return args.f class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' def _snake_case ( self : Tuple ): SCREAMING_SNAKE_CASE = logging.StreamHandler(sys.stdout ) logger.addHandler(__lowerCamelCase ) def _snake_case ( self : List[Any] , __lowerCamelCase : int ): SCREAMING_SNAKE_CASE = get_gpu_count() if n_gpu > 1: pass # XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560 # script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py" # distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split() # cmd = [sys.executable] + distributed_args + args # execute_subprocess_async(cmd, env=self.get_env()) # XXX: test the results - need to save them first into .json file else: args.insert(0 , "run_glue_deebert.py" ) with patch.object(__lowerCamelCase , "argv" , __lowerCamelCase ): SCREAMING_SNAKE_CASE = run_glue_deebert.main() for value in result.values(): self.assertGreaterEqual(__lowerCamelCase , 0.666 ) @slow @require_torch_non_multi_gpu def _snake_case ( self : str ): SCREAMING_SNAKE_CASE = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split() self.run_and_check(__lowerCamelCase ) SCREAMING_SNAKE_CASE = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split() self.run_and_check(__lowerCamelCase ) SCREAMING_SNAKE_CASE = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split() self.run_and_check(__lowerCamelCase )
16
import os def __a ( ): SCREAMING_SNAKE_CASE = os.path.join(os.path.dirname(A__ ) , "num.txt" ) with open(A__ ) as file_hand: return str(sum(int(A__ ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
16
1
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
16
import pytest __A : Optional[Any] = '__dummy_dataset1__' __A : Optional[int] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n' @pytest.fixture def __a ( ): return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def __a ( ): return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def __a ( A__ : Optional[Any] , A__ : List[str] , A__ : Optional[int] ): SCREAMING_SNAKE_CASE = dataset_loading_script_name SCREAMING_SNAKE_CASE = tmp_path / "datasets" / script_name script_dir.mkdir(parents=A__ ) SCREAMING_SNAKE_CASE = script_dir / F"{script_name}.py" with open(A__ , "w" ) as f: f.write(A__ ) return str(A__ )
16
1
from __future__ import annotations import numpy as np def __a ( A__ : np.ndarray ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = np.shape(A__ ) if rows != columns: SCREAMING_SNAKE_CASE = ( "'table' has to be of square shaped array but got a " F"{rows}x{columns} array:\n{table}" ) raise ValueError(A__ ) SCREAMING_SNAKE_CASE = np.zeros((rows, columns) ) SCREAMING_SNAKE_CASE = np.zeros((rows, columns) ) for i in range(A__ ): for j in range(A__ ): SCREAMING_SNAKE_CASE = sum(lower[i][k] * upper[k][j] for k in range(A__ ) ) if upper[j][j] == 0: raise ArithmeticError("No LU decomposition exists" ) SCREAMING_SNAKE_CASE = (table[i][j] - total) / upper[j][j] SCREAMING_SNAKE_CASE = 1 for j in range(A__ , A__ ): SCREAMING_SNAKE_CASE = sum(lower[i][k] * upper[k][j] for k in range(A__ ) ) SCREAMING_SNAKE_CASE = table[i][j] - total return lower, upper if __name__ == "__main__": import doctest doctest.testmod()
16
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert import BertTokenizer __A : str = logging.get_logger(__name__) __A : Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} __A : Tuple = { 'vocab_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } __A : Optional[Any] = { 'vocab_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } __A : str = { 'vocab_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json' ), }, } __A : Optional[Any] = { 'facebook/dpr-ctx_encoder-single-nq-base': 5_1_2, 'facebook/dpr-ctx_encoder-multiset-base': 5_1_2, } __A : List[str] = { 'facebook/dpr-question_encoder-single-nq-base': 5_1_2, 'facebook/dpr-question_encoder-multiset-base': 5_1_2, } __A : Any = { 'facebook/dpr-reader-single-nq-base': 5_1_2, 'facebook/dpr-reader-multiset-base': 5_1_2, } __A : str = { 'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True}, } __A : Any = { 'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True}, } __A : Dict = { 'facebook/dpr-reader-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-reader-multiset-base': {'do_lower_case': True}, } class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION __A : Optional[int] = collections.namedtuple( 'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text'] ) __A : List[Any] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits']) __A : List[Any] = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n ' @add_start_docstrings(__snake_case ) class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __call__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Union[bool, str] = False , __lowerCamelCase : Union[bool, str] = False , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : Optional[bool] = None , **__lowerCamelCase : Any , ): if titles is None and texts is None: return super().__call__( __lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , ) elif titles is None or texts is None: SCREAMING_SNAKE_CASE = titles if texts is None else texts return super().__call__( __lowerCamelCase , __lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , ) SCREAMING_SNAKE_CASE = titles if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [titles] SCREAMING_SNAKE_CASE = texts if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [texts] SCREAMING_SNAKE_CASE = len(__lowerCamelCase ) SCREAMING_SNAKE_CASE = questions if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [questions] * n_passages if len(__lowerCamelCase ) != len(__lowerCamelCase ): raise ValueError( f"There should be as many titles than texts but got {len(__lowerCamelCase )} titles and {len(__lowerCamelCase )} texts." ) SCREAMING_SNAKE_CASE = super().__call__(__lowerCamelCase , __lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase )["input_ids"] SCREAMING_SNAKE_CASE = super().__call__(__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase )["input_ids"] SCREAMING_SNAKE_CASE = { "input_ids": [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(__lowerCamelCase , __lowerCamelCase ) ] } if return_attention_mask is not False: SCREAMING_SNAKE_CASE = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) SCREAMING_SNAKE_CASE = attention_mask return self.pad(__lowerCamelCase , padding=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase ) def _snake_case ( self : Tuple , __lowerCamelCase : BatchEncoding , __lowerCamelCase : DPRReaderOutput , __lowerCamelCase : int = 16 , __lowerCamelCase : int = 64 , __lowerCamelCase : int = 4 , ): SCREAMING_SNAKE_CASE = reader_input["input_ids"] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = reader_output[:3] SCREAMING_SNAKE_CASE = len(__lowerCamelCase ) SCREAMING_SNAKE_CASE = sorted(range(__lowerCamelCase ) , reverse=__lowerCamelCase , key=relevance_logits.__getitem__ ) SCREAMING_SNAKE_CASE = [] for doc_id in sorted_docs: SCREAMING_SNAKE_CASE = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence SCREAMING_SNAKE_CASE = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: SCREAMING_SNAKE_CASE = sequence_ids.index(self.pad_token_id ) else: SCREAMING_SNAKE_CASE = len(__lowerCamelCase ) SCREAMING_SNAKE_CASE = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__lowerCamelCase , top_spans=__lowerCamelCase , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__lowerCamelCase , start_index=__lowerCamelCase , end_index=__lowerCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(__lowerCamelCase ) >= num_spans: break return nbest_spans_predictions[:num_spans] def _snake_case ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : List[int] , __lowerCamelCase : int , __lowerCamelCase : int , ): SCREAMING_SNAKE_CASE = [] for start_index, start_score in enumerate(__lowerCamelCase ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) SCREAMING_SNAKE_CASE = sorted(__lowerCamelCase , key=lambda __lowerCamelCase : x[1] , reverse=__lowerCamelCase ) SCREAMING_SNAKE_CASE = [] for (start_index, end_index), score in scores: if start_index > end_index: raise ValueError(f"Wrong span indices: [{start_index}:{end_index}]" ) SCREAMING_SNAKE_CASE = end_index - start_index + 1 if length > max_answer_length: raise ValueError(f"Span is too long: {length} > {max_answer_length}" ) if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(__lowerCamelCase ) == top_spans: break return chosen_span_intervals @add_end_docstrings(__snake_case ) class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ): '''simple docstring''' lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ = READER_PRETRAINED_INIT_CONFIGURATION lowerCamelCase__ = ["input_ids", "attention_mask"]
16
1
import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Union[str, Any] ): debug_launcher(test_script.main ) def _snake_case ( self : Dict ): debug_launcher(test_ops.main )
16
from typing import Any import numpy as np def __a ( A__ : np.ndarray ): return np.array_equal(A__ , matrix.conjugate().T ) def __a ( A__ : np.ndarray , A__ : np.ndarray ): SCREAMING_SNAKE_CASE = v.conjugate().T SCREAMING_SNAKE_CASE = v_star.dot(A__ ) assert isinstance(A__ , np.ndarray ) return (v_star_dot.dot(A__ )) / (v_star.dot(A__ )) def __a ( ): SCREAMING_SNAKE_CASE = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] ) SCREAMING_SNAKE_CASE = np.array([[1], [2], [3]] ) assert is_hermitian(A__ ), F"{a} is not hermitian." print(rayleigh_quotient(A__ , A__ ) ) SCREAMING_SNAKE_CASE = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(A__ ), F"{a} is not hermitian." assert rayleigh_quotient(A__ , A__ ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
16
1
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 6_5_0, "eval_accuracy": 0.7, "eval_loss": 0.6}, }, { "framework": "pytorch", "script": "run_ddp.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 6_0_0, "eval_accuracy": 0.7, "eval_loss": 0.6}, }, { "framework": "tensorflow", "script": "run_tf_dist.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 6_0_0, "eval_accuracy": 0.6, "eval_loss": 0.7}, }, ] ) class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Optional[Any] ): if self.framework == "pytorch": subprocess.run( f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="utf-8" , check=__lowerCamelCase , ) assert hasattr(self , "env" ) def _snake_case ( self : Optional[Any] , __lowerCamelCase : str ): SCREAMING_SNAKE_CASE = f"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}" # distributed data settings SCREAMING_SNAKE_CASE = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__lowerCamelCase , instance_count=__lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=__lowerCamelCase , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__lowerCamelCase , py_version="py36" , ) def _snake_case ( self : Optional[Any] , __lowerCamelCase : Tuple ): TrainingJobAnalytics(__lowerCamelCase ).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv" ) @parameterized.expand([(2,)] ) def _snake_case ( self : int , __lowerCamelCase : int ): # create estimator SCREAMING_SNAKE_CASE = self.create_estimator(__lowerCamelCase ) # run training estimator.fit() # result dataframe SCREAMING_SNAKE_CASE = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis SCREAMING_SNAKE_CASE = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] ) SCREAMING_SNAKE_CASE = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping SCREAMING_SNAKE_CASE = ( Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy ) assert all(t <= self.results["eval_loss"] for t in eval_loss ) # dump tests result into json file to share in PR with open(f"{estimator.latest_training_job.name}.json" , "w" ) as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __lowerCamelCase )
16
from __future__ import annotations __A : str = list[tuple[int, int]] __A : Optional[int] = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] __A : List[str] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : str , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float , __lowerCamelCase : Node | None , ): SCREAMING_SNAKE_CASE = pos_x SCREAMING_SNAKE_CASE = pos_y SCREAMING_SNAKE_CASE = (pos_y, pos_x) SCREAMING_SNAKE_CASE = goal_x SCREAMING_SNAKE_CASE = goal_y SCREAMING_SNAKE_CASE = g_cost SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = self.calculate_heuristic() def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = abs(self.pos_x - self.goal_x ) SCREAMING_SNAKE_CASE = abs(self.pos_y - self.goal_y ) return dx + dy def __lt__( self : Union[str, Any] , __lowerCamelCase : List[Any] ): return self.f_cost < other.f_cost class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Optional[int] , __lowerCamelCase : tuple[int, int] , __lowerCamelCase : tuple[int, int] ): SCREAMING_SNAKE_CASE = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __lowerCamelCase ) SCREAMING_SNAKE_CASE = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , __lowerCamelCase ) SCREAMING_SNAKE_CASE = [self.start] SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = False def _snake_case ( self : Optional[Any] ): while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() SCREAMING_SNAKE_CASE = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: SCREAMING_SNAKE_CASE = True return self.retrace_path(__lowerCamelCase ) self.closed_nodes.append(__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.get_successors(__lowerCamelCase ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(__lowerCamelCase ) else: # retrieve the best current path SCREAMING_SNAKE_CASE = self.open_nodes.pop(self.open_nodes.index(__lowerCamelCase ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(__lowerCamelCase ) else: self.open_nodes.append(__lowerCamelCase ) if not self.reached: return [self.start.pos] return None def _snake_case ( self : List[Any] , __lowerCamelCase : Node ): SCREAMING_SNAKE_CASE = [] for action in delta: SCREAMING_SNAKE_CASE = parent.pos_x + action[1] SCREAMING_SNAKE_CASE = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowerCamelCase ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( __lowerCamelCase , __lowerCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __lowerCamelCase , ) ) return successors def _snake_case ( self : str , __lowerCamelCase : Node | None ): SCREAMING_SNAKE_CASE = node SCREAMING_SNAKE_CASE = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) SCREAMING_SNAKE_CASE = current_node.parent path.reverse() return path if __name__ == "__main__": __A : Optional[Any] = (0, 0) __A : Optional[int] = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print('------') __A : List[str] = GreedyBestFirst(init, goal) __A : Tuple = greedy_bf.search() if path: for pos_x, pos_y in path: __A : Optional[Any] = 2 for elem in grid: print(elem)
16
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __A : Optional[int] = { 'configuration_xlm': ['XLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMConfig', 'XLMOnnxConfig'], 'tokenization_xlm': ['XLMTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = [ 'XLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'XLMForMultipleChoice', 'XLMForQuestionAnswering', 'XLMForQuestionAnsweringSimple', 'XLMForSequenceClassification', 'XLMForTokenClassification', 'XLMModel', 'XLMPreTrainedModel', 'XLMWithLMHeadModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[Any] = [ 'TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFXLMForMultipleChoice', 'TFXLMForQuestionAnsweringSimple', 'TFXLMForSequenceClassification', 'TFXLMForTokenClassification', 'TFXLMMainLayer', 'TFXLMModel', 'TFXLMPreTrainedModel', 'TFXLMWithLMHeadModel', ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys __A : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
16
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES __A : int = logging.get_logger(__name__) __A : List[str] = OrderedDict( [ # Base model mapping ('albert', 'FlaxAlbertModel'), ('bart', 'FlaxBartModel'), ('beit', 'FlaxBeitModel'), ('bert', 'FlaxBertModel'), ('big_bird', 'FlaxBigBirdModel'), ('blenderbot', 'FlaxBlenderbotModel'), ('blenderbot-small', 'FlaxBlenderbotSmallModel'), ('clip', 'FlaxCLIPModel'), ('distilbert', 'FlaxDistilBertModel'), ('electra', 'FlaxElectraModel'), ('gpt-sw3', 'FlaxGPT2Model'), ('gpt2', 'FlaxGPT2Model'), ('gpt_neo', 'FlaxGPTNeoModel'), ('gptj', 'FlaxGPTJModel'), ('longt5', 'FlaxLongT5Model'), ('marian', 'FlaxMarianModel'), ('mbart', 'FlaxMBartModel'), ('mt5', 'FlaxMT5Model'), ('opt', 'FlaxOPTModel'), ('pegasus', 'FlaxPegasusModel'), ('regnet', 'FlaxRegNetModel'), ('resnet', 'FlaxResNetModel'), ('roberta', 'FlaxRobertaModel'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'), ('roformer', 'FlaxRoFormerModel'), ('t5', 'FlaxT5Model'), ('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'), ('vit', 'FlaxViTModel'), ('wav2vec2', 'FlaxWav2Vec2Model'), ('whisper', 'FlaxWhisperModel'), ('xglm', 'FlaxXGLMModel'), ('xlm-roberta', 'FlaxXLMRobertaModel'), ] ) __A : Optional[Any] = OrderedDict( [ # Model for pre-training mapping ('albert', 'FlaxAlbertForPreTraining'), ('bart', 'FlaxBartForConditionalGeneration'), ('bert', 'FlaxBertForPreTraining'), ('big_bird', 'FlaxBigBirdForPreTraining'), ('electra', 'FlaxElectraForPreTraining'), ('longt5', 'FlaxLongT5ForConditionalGeneration'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('mt5', 'FlaxMT5ForConditionalGeneration'), ('roberta', 'FlaxRobertaForMaskedLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'), ('roformer', 'FlaxRoFormerForMaskedLM'), ('t5', 'FlaxT5ForConditionalGeneration'), ('wav2vec2', 'FlaxWav2Vec2ForPreTraining'), ('whisper', 'FlaxWhisperForConditionalGeneration'), ('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'), ] ) __A : Tuple = OrderedDict( [ # Model for Masked LM mapping ('albert', 'FlaxAlbertForMaskedLM'), ('bart', 'FlaxBartForConditionalGeneration'), ('bert', 'FlaxBertForMaskedLM'), ('big_bird', 'FlaxBigBirdForMaskedLM'), ('distilbert', 'FlaxDistilBertForMaskedLM'), ('electra', 'FlaxElectraForMaskedLM'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('roberta', 'FlaxRobertaForMaskedLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'), ('roformer', 'FlaxRoFormerForMaskedLM'), ('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'), ] ) __A : Dict = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ('bart', 'FlaxBartForConditionalGeneration'), ('blenderbot', 'FlaxBlenderbotForConditionalGeneration'), ('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'), ('encoder-decoder', 'FlaxEncoderDecoderModel'), ('longt5', 'FlaxLongT5ForConditionalGeneration'), ('marian', 'FlaxMarianMTModel'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('mt5', 'FlaxMT5ForConditionalGeneration'), ('pegasus', 'FlaxPegasusForConditionalGeneration'), ('t5', 'FlaxT5ForConditionalGeneration'), ] ) __A : Any = OrderedDict( [ # Model for Image-classsification ('beit', 'FlaxBeitForImageClassification'), ('regnet', 'FlaxRegNetForImageClassification'), ('resnet', 'FlaxResNetForImageClassification'), ('vit', 'FlaxViTForImageClassification'), ] ) __A : Optional[int] = OrderedDict( [ ('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'), ] ) __A : Union[str, Any] = OrderedDict( [ # Model for Causal LM mapping ('bart', 'FlaxBartForCausalLM'), ('bert', 'FlaxBertForCausalLM'), ('big_bird', 'FlaxBigBirdForCausalLM'), ('electra', 'FlaxElectraForCausalLM'), ('gpt-sw3', 'FlaxGPT2LMHeadModel'), ('gpt2', 'FlaxGPT2LMHeadModel'), ('gpt_neo', 'FlaxGPTNeoForCausalLM'), ('gptj', 'FlaxGPTJForCausalLM'), ('opt', 'FlaxOPTForCausalLM'), ('roberta', 'FlaxRobertaForCausalLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'), ('xglm', 'FlaxXGLMForCausalLM'), ('xlm-roberta', 'FlaxXLMRobertaForCausalLM'), ] ) __A : Optional[int] = OrderedDict( [ # Model for Sequence Classification mapping ('albert', 'FlaxAlbertForSequenceClassification'), ('bart', 'FlaxBartForSequenceClassification'), ('bert', 'FlaxBertForSequenceClassification'), ('big_bird', 'FlaxBigBirdForSequenceClassification'), ('distilbert', 'FlaxDistilBertForSequenceClassification'), ('electra', 'FlaxElectraForSequenceClassification'), ('mbart', 'FlaxMBartForSequenceClassification'), ('roberta', 'FlaxRobertaForSequenceClassification'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'), ('roformer', 'FlaxRoFormerForSequenceClassification'), ('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'), ] ) __A : str = OrderedDict( [ # Model for Question Answering mapping ('albert', 'FlaxAlbertForQuestionAnswering'), ('bart', 'FlaxBartForQuestionAnswering'), ('bert', 'FlaxBertForQuestionAnswering'), ('big_bird', 'FlaxBigBirdForQuestionAnswering'), ('distilbert', 'FlaxDistilBertForQuestionAnswering'), ('electra', 'FlaxElectraForQuestionAnswering'), ('mbart', 'FlaxMBartForQuestionAnswering'), ('roberta', 'FlaxRobertaForQuestionAnswering'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'), ('roformer', 'FlaxRoFormerForQuestionAnswering'), ('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'), ] ) __A : Dict = OrderedDict( [ # Model for Token Classification mapping ('albert', 'FlaxAlbertForTokenClassification'), ('bert', 'FlaxBertForTokenClassification'), ('big_bird', 'FlaxBigBirdForTokenClassification'), ('distilbert', 'FlaxDistilBertForTokenClassification'), ('electra', 'FlaxElectraForTokenClassification'), ('roberta', 'FlaxRobertaForTokenClassification'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'), ('roformer', 'FlaxRoFormerForTokenClassification'), ('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'), ] ) __A : Dict = OrderedDict( [ # Model for Multiple Choice mapping ('albert', 'FlaxAlbertForMultipleChoice'), ('bert', 'FlaxBertForMultipleChoice'), ('big_bird', 'FlaxBigBirdForMultipleChoice'), ('distilbert', 'FlaxDistilBertForMultipleChoice'), ('electra', 'FlaxElectraForMultipleChoice'), ('roberta', 'FlaxRobertaForMultipleChoice'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'), ('roformer', 'FlaxRoFormerForMultipleChoice'), ('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'), ] ) __A : Any = OrderedDict( [ ('bert', 'FlaxBertForNextSentencePrediction'), ] ) __A : Optional[int] = OrderedDict( [ ('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'), ('whisper', 'FlaxWhisperForConditionalGeneration'), ] ) __A : List[str] = OrderedDict( [ ('whisper', 'FlaxWhisperForAudioClassification'), ] ) __A : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) __A : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) __A : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) __A : Tuple = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) __A : Any = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) __A : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) __A : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) __A : Optional[int] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) __A : List[str] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) __A : Union[str, Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) __A : List[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) __A : Optional[int] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) __A : List[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) __A : Tuple = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_MAPPING __A : Optional[int] = auto_class_update(FlaxAutoModel) class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING __A : Optional[Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining') class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING __A : Tuple = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling') class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING __A : List[Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling') class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING __A : int = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base' ) class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING __A : Optional[int] = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc='sequence classification' ) class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING __A : int = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering') class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING __A : List[Any] = auto_class_update( FlaxAutoModelForTokenClassification, head_doc='token classification' ) class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING __A : Any = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice') class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING __A : Union[str, Any] = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction' ) class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING __A : Optional[Any] = auto_class_update( FlaxAutoModelForImageClassification, head_doc='image classification' ) class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING __A : int = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling') class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING __A : Optional[int] = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling' )
16
1
from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function __A : List[Any] = 1.0_5457_1817e-34 # unit of ℏ : J * s __A : List[str] = 3e8 # unit of c : m * s^-1 def __a ( A__ : float , A__ : float , A__ : float ): if (force, area, distance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if force < 0: raise ValueError("Magnitude of force can not be negative" ) if distance < 0: raise ValueError("Distance can not be negative" ) if area < 0: raise ValueError("Area can not be negative" ) if force == 0: SCREAMING_SNAKE_CASE = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 240 * (distance) ** 4 ) return {"force": force} elif area == 0: SCREAMING_SNAKE_CASE = (240 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: SCREAMING_SNAKE_CASE = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError("One and only one argument must be 0" ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
16
def __a ( A__ : float , A__ : float ): if mass < 0: raise ValueError("The mass of a body cannot be negative" ) return 0.5 * mass * abs(A__ ) * abs(A__ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
16
1
import pytest __A : Optional[Any] = '__dummy_dataset1__' __A : Optional[int] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n' @pytest.fixture def __a ( ): return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def __a ( ): return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def __a ( A__ : Optional[Any] , A__ : List[str] , A__ : Optional[int] ): SCREAMING_SNAKE_CASE = dataset_loading_script_name SCREAMING_SNAKE_CASE = tmp_path / "datasets" / script_name script_dir.mkdir(parents=A__ ) SCREAMING_SNAKE_CASE = script_dir / F"{script_name}.py" with open(A__ , "w" ) as f: f.write(A__ ) return str(A__ )
16
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf __A : Dict = logging.get_logger(__name__) @dataclass class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = [ "no_inference", "no_cuda", "no_tpu", "no_speed", "no_memory", "no_env_print", "no_multi_process", ] def __init__( self : List[Any] , **__lowerCamelCase : Any ): for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: SCREAMING_SNAKE_CASE = deprecated_arg[3:] SCREAMING_SNAKE_CASE = not kwargs.pop(__lowerCamelCase ) logger.warning( f"{deprecated_arg} is depreciated. Please use --no-{positive_arg} or" f" {positive_arg}={kwargs[positive_arg]}" ) SCREAMING_SNAKE_CASE = kwargs.pop("tpu_name" , self.tpu_name ) SCREAMING_SNAKE_CASE = kwargs.pop("device_idx" , self.device_idx ) SCREAMING_SNAKE_CASE = kwargs.pop("eager_mode" , self.eager_mode ) SCREAMING_SNAKE_CASE = kwargs.pop("use_xla" , self.use_xla ) super().__init__(**__lowerCamelCase ) lowerCamelCase__ = field( default=__snake_case , metadata={"help": "Name of TPU"} , ) lowerCamelCase__ = field( default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , ) lowerCamelCase__ = field(default=__snake_case , metadata={"help": "Benchmark models in eager model."} ) lowerCamelCase__ = field( default=__snake_case , metadata={ "help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`." } , ) @cached_property def _snake_case ( self : Optional[int] ): requires_backends(self , ["tf"] ) SCREAMING_SNAKE_CASE = None if self.tpu: try: if self.tpu_name: SCREAMING_SNAKE_CASE = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: SCREAMING_SNAKE_CASE = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: SCREAMING_SNAKE_CASE = None return tpu @cached_property def _snake_case ( self : Any ): requires_backends(self , ["tf"] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) SCREAMING_SNAKE_CASE = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" ) SCREAMING_SNAKE_CASE = tf.distribute.OneDeviceStrategy(device=f"/gpu:{self.device_idx}" ) else: tf.config.set_visible_devices([] , "GPU" ) # disable GPU SCREAMING_SNAKE_CASE = tf.distribute.OneDeviceStrategy(device=f"/cpu:{self.device_idx}" ) return strategy @property def _snake_case ( self : Any ): requires_backends(self , ["tf"] ) return self._setup_tpu is not None @property def _snake_case ( self : Optional[Any] ): requires_backends(self , ["tf"] ) return self._setup_strategy @property def _snake_case ( self : List[str] ): requires_backends(self , ["tf"] ) return tf.config.list_physical_devices("GPU" ) @property def _snake_case ( self : Any ): requires_backends(self , ["tf"] ) if self.cuda: return len(self.gpu_list ) return 0 @property def _snake_case ( self : Dict ): return self.n_gpu > 0
16
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __A : str = { 'configuration_blenderbot_small': [ 'BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BlenderbotSmallConfig', 'BlenderbotSmallOnnxConfig', ], 'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : str = ['BlenderbotSmallTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = [ 'BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST', 'BlenderbotSmallForCausalLM', 'BlenderbotSmallForConditionalGeneration', 'BlenderbotSmallModel', 'BlenderbotSmallPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : str = [ 'TFBlenderbotSmallForConditionalGeneration', 'TFBlenderbotSmallModel', 'TFBlenderbotSmallPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : int = [ 'FlaxBlenderbotSmallForConditionalGeneration', 'FlaxBlenderbotSmallModel', 'FlaxBlenderbotSmallPreTrainedModel', ] if TYPE_CHECKING: from .configuration_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotSmallConfig, BlenderbotSmallOnnxConfig, ) from .tokenization_blenderbot_small import BlenderbotSmallTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotSmallForCausalLM, BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel, BlenderbotSmallPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot_small import ( TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel, TFBlenderbotSmallPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, FlaxBlenderbotSmallPreTrainedModel, ) else: import sys __A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
16
from collections.abc import Callable import numpy as np def __a ( A__ : Callable , A__ : float , A__ : float , A__ : float , A__ : float ): SCREAMING_SNAKE_CASE = int(np.ceil((x_end - xa) / step_size ) ) SCREAMING_SNAKE_CASE = np.zeros((n + 1,) ) SCREAMING_SNAKE_CASE = ya SCREAMING_SNAKE_CASE = xa for k in range(A__ ): SCREAMING_SNAKE_CASE = y[k] + step_size * ode_func(A__ , y[k] ) SCREAMING_SNAKE_CASE = y[k] + ( (step_size / 2) * (ode_func(A__ , y[k] ) + ode_func(x + step_size , A__ )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
16
1
import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor __A : str = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' def __init__( self : List[str] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : List[Any] ): warnings.warn( "The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use FlavaImageProcessor instead." , __lowerCamelCase , ) super().__init__(*__lowerCamelCase , **__lowerCamelCase )
16
def __a ( A__ : int ): if not isinstance(A__ , A__ ): raise ValueError("Input must be an integer" ) if input_num <= 0: raise ValueError("Input must be positive" ) return sum( divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 ) if __name__ == "__main__": import doctest doctest.testmod()
16
1
from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. __A : str = 2_0_0 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. __A : List[Any] = 5_0 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. __A : Union[str, Any] = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1_0_0_0)) def __a ( A__ : str , A__ : str ): SCREAMING_SNAKE_CASE = len([g for position, g in enumerate(A__ ) if g == main_target[position]] ) return (item, float(A__ )) def __a ( A__ : str , A__ : str ): SCREAMING_SNAKE_CASE = random.randint(0 , len(A__ ) - 1 ) SCREAMING_SNAKE_CASE = parent_a[:random_slice] + parent_a[random_slice:] SCREAMING_SNAKE_CASE = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def __a ( A__ : str , A__ : list[str] ): SCREAMING_SNAKE_CASE = list(A__ ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: SCREAMING_SNAKE_CASE = random.choice(A__ ) return "".join(A__ ) def __a ( A__ : tuple[str, float] , A__ : list[tuple[str, float]] , A__ : list[str] , ): SCREAMING_SNAKE_CASE = [] # Generate more children proportionally to the fitness score. SCREAMING_SNAKE_CASE = int(parent_a[1] * 100 ) + 1 SCREAMING_SNAKE_CASE = 10 if child_n >= 10 else child_n for _ in range(A__ ): SCREAMING_SNAKE_CASE = population_score[random.randint(0 , A__ )][0] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = crossover(parent_a[0] , A__ ) # Append new string to the population list. pop.append(mutate(A__ , A__ ) ) pop.append(mutate(A__ , A__ ) ) return pop def __a ( A__ : str , A__ : list[str] , A__ : bool = True ): # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: SCREAMING_SNAKE_CASE = F"{N_POPULATION} must be bigger than {N_SELECTED}" raise ValueError(A__ ) # Verify that the target contains no genes besides the ones inside genes variable. SCREAMING_SNAKE_CASE = sorted({c for c in target if c not in genes} ) if not_in_genes_list: SCREAMING_SNAKE_CASE = F"{not_in_genes_list} is not in genes list, evolution cannot converge" raise ValueError(A__ ) # Generate random starting population. SCREAMING_SNAKE_CASE = [] for _ in range(A__ ): population.append("".join([random.choice(A__ ) for i in range(len(A__ ) )] ) ) # Just some logs to know what the algorithms is doing. SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(A__ ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. SCREAMING_SNAKE_CASE = [evaluate(A__ , A__ ) for item in population] # Check if there is a matching evolution. SCREAMING_SNAKE_CASE = sorted(A__ , key=lambda A__ : x[1] , reverse=A__ ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( F"\nGeneration: {generation}" F"\nTotal Population:{total_population}" F"\nBest score: {population_score[0][1]}" F"\nBest string: {population_score[0][0]}" ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. SCREAMING_SNAKE_CASE = population[: int(N_POPULATION / 3 )] population.clear() population.extend(A__ ) # Normalize population score to be between 0 and 1. SCREAMING_SNAKE_CASE = [ (item, score / len(A__ )) for item, score in population_score ] # This is selection for i in range(A__ ): population.extend(select(population_score[int(A__ )] , A__ , A__ ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(A__ ) > N_POPULATION: break if __name__ == "__main__": __A : Optional[Any] = ( 'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!' ) __A : int = list( ' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm' 'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\' ) __A , __A , __A : Dict = basic(target_str, genes_list) print( f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}' )
16
from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent __A : List[Any] = {'UserAgent': UserAgent().random} def __a ( A__ : List[Any] ): SCREAMING_SNAKE_CASE = script.contents[0] SCREAMING_SNAKE_CASE = json.loads(data[data.find("{\"config\"" ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : List[str] , __lowerCamelCase : Optional[Any] ): SCREAMING_SNAKE_CASE = f"https://www.instagram.com/{username}/" SCREAMING_SNAKE_CASE = self.get_json() def _snake_case ( self : Tuple ): SCREAMING_SNAKE_CASE = requests.get(self.url , headers=__lowerCamelCase ).text SCREAMING_SNAKE_CASE = BeautifulSoup(__lowerCamelCase , "html.parser" ).find_all("script" ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__( self : Union[str, Any] ): return f"{self.__class__.__name__}('{self.username}')" def __str__( self : str ): return f"{self.fullname} ({self.username}) is {self.biography}" @property def _snake_case ( self : Optional[int] ): return self.user_data["username"] @property def _snake_case ( self : List[Any] ): return self.user_data["full_name"] @property def _snake_case ( self : List[str] ): return self.user_data["biography"] @property def _snake_case ( self : Tuple ): return self.user_data["business_email"] @property def _snake_case ( self : Optional[Any] ): return self.user_data["external_url"] @property def _snake_case ( self : int ): return self.user_data["edge_followed_by"]["count"] @property def _snake_case ( self : List[str] ): return self.user_data["edge_follow"]["count"] @property def _snake_case ( self : List[Any] ): return self.user_data["edge_owner_to_timeline_media"]["count"] @property def _snake_case ( self : Any ): return self.user_data["profile_pic_url_hd"] @property def _snake_case ( self : Optional[int] ): return self.user_data["is_verified"] @property def _snake_case ( self : Dict ): return self.user_data["is_private"] def __a ( A__ : str = "github" ): import os if os.environ.get("CI" ): return # test failing on GitHub Actions SCREAMING_SNAKE_CASE = InstagramUser(A__ ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , A__ ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 150 assert instagram_user.number_of_followers > 120000 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "[email protected]" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith("https://instagram." ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() __A : Dict = InstagramUser('github') print(instagram_user) print(f'{instagram_user.number_of_posts = }') print(f'{instagram_user.number_of_followers = }') print(f'{instagram_user.number_of_followings = }') print(f'{instagram_user.email = }') print(f'{instagram_user.website = }') print(f'{instagram_user.profile_picture_url = }') print(f'{instagram_user.is_verified = }') print(f'{instagram_user.is_private = }')
16
1
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import KarrasVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = 42 lowerCamelCase__ = 42 def __init__( self : Optional[Any] , __lowerCamelCase : UNetaDModel , __lowerCamelCase : KarrasVeScheduler ): super().__init__() self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase ) @torch.no_grad() def __call__( self : List[str] , __lowerCamelCase : int = 1 , __lowerCamelCase : int = 50 , __lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , **__lowerCamelCase : List[str] , ): SCREAMING_SNAKE_CASE = self.unet.config.sample_size SCREAMING_SNAKE_CASE = (batch_size, 3, img_size, img_size) SCREAMING_SNAKE_CASE = self.unet # sample x_0 ~ N(0, sigma_0^2 * I) SCREAMING_SNAKE_CASE = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=self.device ) * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(__lowerCamelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # here sigma_t == t_i from the paper SCREAMING_SNAKE_CASE = self.scheduler.schedule[t] SCREAMING_SNAKE_CASE = self.scheduler.schedule[t - 1] if t > 0 else 0 # 1. Select temporarily increased noise level sigma_hat # 2. Add new noise to move from sample_i to sample_hat SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.scheduler.add_noise_to_input(__lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase ) # 3. Predict the noise residual given the noise magnitude `sigma_hat` # The model inputs and output are adjusted by following eq. (213) in [1]. SCREAMING_SNAKE_CASE = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample # 4. Evaluate dx/dt at sigma_hat # 5. Take Euler step from sigma to sigma_prev SCREAMING_SNAKE_CASE = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if sigma_prev != 0: # 6. Apply 2nd order correction # The model inputs and output are adjusted by following eq. (213) in [1]. SCREAMING_SNAKE_CASE = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample SCREAMING_SNAKE_CASE = self.scheduler.step_correct( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , step_output.prev_sample , step_output["derivative"] , ) SCREAMING_SNAKE_CASE = step_output.prev_sample SCREAMING_SNAKE_CASE = (sample / 2 + 0.5).clamp(0 , 1 ) SCREAMING_SNAKE_CASE = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": SCREAMING_SNAKE_CASE = self.numpy_to_pil(__lowerCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=__lowerCamelCase )
16
import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __A : Any = logging.get_logger(__name__) __A : Any = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } __A : Optional[Any] = { 'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'}, 'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'}, 'tokenizer_config_file': { 'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json' }, } __A : Union[str, Any] = {'facebook/blenderbot-3B': 1_2_8} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def __a ( ): SCREAMING_SNAKE_CASE = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) SCREAMING_SNAKE_CASE = bs[:] SCREAMING_SNAKE_CASE = 0 for b in range(2**8 ): if b not in bs: bs.append(A__ ) cs.append(2**8 + n ) n += 1 SCREAMING_SNAKE_CASE = [chr(A__ ) for n in cs] return dict(zip(A__ , A__ ) ) def __a ( A__ : List[Any] ): SCREAMING_SNAKE_CASE = set() SCREAMING_SNAKE_CASE = word[0] for char in word[1:]: pairs.add((prev_char, char) ) SCREAMING_SNAKE_CASE = char return pairs class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ = ["input_ids", "attention_mask"] def __init__( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any="replace" , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : Optional[Any]="</s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : Union[str, Any]="<s>" , __lowerCamelCase : List[str]="<unk>" , __lowerCamelCase : Optional[Any]="<pad>" , __lowerCamelCase : Dict="<mask>" , __lowerCamelCase : Any=False , **__lowerCamelCase : Optional[Any] , ): SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token super().__init__( errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , ) with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle: SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase ) SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()} SCREAMING_SNAKE_CASE = errors # how to handle errors in decoding SCREAMING_SNAKE_CASE = bytes_to_unicode() SCREAMING_SNAKE_CASE = {v: k for k, v in self.byte_encoder.items()} with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle: SCREAMING_SNAKE_CASE = merges_handle.read().split("\n" )[1:-1] SCREAMING_SNAKE_CASE = [tuple(merge.split() ) for merge in bpe_merges] SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions SCREAMING_SNAKE_CASE = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def _snake_case ( self : str ): return len(self.encoder ) def _snake_case ( self : Union[str, Any] ): return dict(self.encoder , **self.added_tokens_encoder ) def _snake_case ( self : Dict , __lowerCamelCase : List[Any] ): if token in self.cache: return self.cache[token] SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase ) SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase ) if not pairs: return token while True: SCREAMING_SNAKE_CASE = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = bigram SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = 0 while i < len(__lowerCamelCase ): try: SCREAMING_SNAKE_CASE = word.index(__lowerCamelCase , __lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) SCREAMING_SNAKE_CASE = j if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase ) SCREAMING_SNAKE_CASE = new_word if len(__lowerCamelCase ) == 1: break else: SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase ) SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase ) SCREAMING_SNAKE_CASE = word return word def _snake_case ( self : Optional[Any] , __lowerCamelCase : List[Any] ): SCREAMING_SNAKE_CASE = [] for token in re.findall(self.pat , __lowerCamelCase ): SCREAMING_SNAKE_CASE = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) ) return bpe_tokens def _snake_case ( self : Tuple , __lowerCamelCase : Dict ): return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) ) def _snake_case ( self : Any , __lowerCamelCase : Optional[int] ): return self.decoder.get(__lowerCamelCase ) def _snake_case ( self : Optional[int] , __lowerCamelCase : List[Any] ): SCREAMING_SNAKE_CASE = "".join(__lowerCamelCase ) SCREAMING_SNAKE_CASE = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def _snake_case ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): if not os.path.isdir(__lowerCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return SCREAMING_SNAKE_CASE = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) SCREAMING_SNAKE_CASE = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" ) SCREAMING_SNAKE_CASE = 0 with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) SCREAMING_SNAKE_CASE = token_index writer.write(" ".join(__lowerCamelCase ) + "\n" ) index += 1 return vocab_file, merge_file def _snake_case ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCamelCase )) + [1] return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1] def _snake_case ( self : Optional[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): SCREAMING_SNAKE_CASE = [self.sep_token_id] SCREAMING_SNAKE_CASE = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Any=False , **__lowerCamelCase : Union[str, Any] ): SCREAMING_SNAKE_CASE = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()): SCREAMING_SNAKE_CASE = " " + text return (text, kwargs) def _snake_case ( self : Any , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): return token_ids_a + [self.eos_token_id] def _snake_case ( self : int , __lowerCamelCase : "Conversation" ): SCREAMING_SNAKE_CASE = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(" " + text ) else: # Generated responses should contain them already. inputs.append(__lowerCamelCase ) SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.encode(__lowerCamelCase ) if len(__lowerCamelCase ) > self.model_max_length: SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :] logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." ) return input_ids
16
1
import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict=13 , __lowerCamelCase : Tuple=30 , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : int=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : int=32 , __lowerCamelCase : int=5 , __lowerCamelCase : List[str]=4 , __lowerCamelCase : Any=37 , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : str=10 , __lowerCamelCase : Union[str, Any]=0.02 , __lowerCamelCase : Any=None , ): SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = patch_size SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = is_training SCREAMING_SNAKE_CASE = use_labels SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = type_sequence_label_size SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2 SCREAMING_SNAKE_CASE = num_patches + 1 def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE = None if self.use_labels: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE = self.get_config() return config, pixel_values, labels def _snake_case ( self : Tuple ): return ViTMSNConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def _snake_case ( self : int , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] ): SCREAMING_SNAKE_CASE = ViTMSNModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case ( self : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] ): SCREAMING_SNAKE_CASE = self.type_sequence_label_size SCREAMING_SNAKE_CASE = ViTMSNForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model(__lowerCamelCase , labels=__lowerCamelCase ) print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" ) print("Labels: {labels}" ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images SCREAMING_SNAKE_CASE = 1 SCREAMING_SNAKE_CASE = ViTMSNForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE = model(__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () lowerCamelCase__ = ( {"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification} if is_torch_available() else {} ) lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = ViTMSNModelTester(self ) SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 ) def _snake_case ( self : Optional[Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="ViTMSN does not use inputs_embeds" ) def _snake_case ( self : Tuple ): pass def _snake_case ( self : Any ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) SCREAMING_SNAKE_CASE = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) ) def _snake_case ( self : List[str] ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) SCREAMING_SNAKE_CASE = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def _snake_case ( self : Any ): SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def _snake_case ( self : List[str] ): for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE = ViTMSNModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def __a ( ): SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @cached_property def _snake_case ( self : Any ): return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None @slow def _snake_case ( self : List[str] ): torch.manual_seed(2 ) SCREAMING_SNAKE_CASE = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.default_image_processor SCREAMING_SNAKE_CASE = prepare_img() SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ) # verify the logits SCREAMING_SNAKE_CASE = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) SCREAMING_SNAKE_CASE = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
16
import argparse import json import math import os import time import traceback import zipfile from collections import Counter import requests def __a ( A__ : str , A__ : List[Any]=None ): SCREAMING_SNAKE_CASE = None if token is not None: SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"} SCREAMING_SNAKE_CASE = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100" SCREAMING_SNAKE_CASE = requests.get(A__ , headers=A__ ).json() SCREAMING_SNAKE_CASE = {} try: job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} ) SCREAMING_SNAKE_CASE = math.ceil((result["total_count"] - 100) / 100 ) for i in range(A__ ): SCREAMING_SNAKE_CASE = requests.get(url + F"&page={i + 2}" , headers=A__ ).json() job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} ) return job_links except Exception: print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" ) return {} def __a ( A__ : List[Any] , A__ : Optional[int]=None ): SCREAMING_SNAKE_CASE = None if token is not None: SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"} SCREAMING_SNAKE_CASE = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100" SCREAMING_SNAKE_CASE = requests.get(A__ , headers=A__ ).json() SCREAMING_SNAKE_CASE = {} try: artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} ) SCREAMING_SNAKE_CASE = math.ceil((result["total_count"] - 100) / 100 ) for i in range(A__ ): SCREAMING_SNAKE_CASE = requests.get(url + F"&page={i + 2}" , headers=A__ ).json() artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} ) return artifacts except Exception: print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" ) return {} def __a ( A__ : Any , A__ : str , A__ : List[str] , A__ : Union[str, Any] ): SCREAMING_SNAKE_CASE = None if token is not None: SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"} SCREAMING_SNAKE_CASE = requests.get(A__ , headers=A__ , allow_redirects=A__ ) SCREAMING_SNAKE_CASE = result.headers["Location"] SCREAMING_SNAKE_CASE = requests.get(A__ , allow_redirects=A__ ) SCREAMING_SNAKE_CASE = os.path.join(A__ , F"{artifact_name}.zip" ) with open(A__ , "wb" ) as fp: fp.write(response.content ) def __a ( A__ : List[Any] , A__ : List[Any]=None ): SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = None with zipfile.ZipFile(A__ ) as z: for filename in z.namelist(): if not os.path.isdir(A__ ): # read the file if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]: with z.open(A__ ) as f: for line in f: SCREAMING_SNAKE_CASE = line.decode("UTF-8" ).strip() if filename == "failures_line.txt": try: # `error_line` is the place where `error` occurs SCREAMING_SNAKE_CASE = line[: line.index(": " )] SCREAMING_SNAKE_CASE = line[line.index(": " ) + len(": " ) :] errors.append([error_line, error] ) except Exception: # skip un-related lines pass elif filename == "summary_short.txt" and line.startswith("FAILED " ): # `test` is the test method that failed SCREAMING_SNAKE_CASE = line[len("FAILED " ) :] failed_tests.append(A__ ) elif filename == "job_name.txt": SCREAMING_SNAKE_CASE = line if len(A__ ) != len(A__ ): raise ValueError( F"`errors` and `failed_tests` should have the same number of elements. Got {len(A__ )} for `errors` " F"and {len(A__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some" " problem." ) SCREAMING_SNAKE_CASE = None if job_name and job_links: SCREAMING_SNAKE_CASE = job_links.get(A__ , A__ ) # A list with elements of the form (line of error, error, failed test) SCREAMING_SNAKE_CASE = [x + [y] + [job_link] for x, y in zip(A__ , A__ )] return result def __a ( A__ : Union[str, Any] , A__ : Union[str, Any]=None ): SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [os.path.join(A__ , A__ ) for p in os.listdir(A__ ) if p.endswith(".zip" )] for p in paths: errors.extend(get_errors_from_single_artifact(A__ , job_links=A__ ) ) return errors def __a ( A__ : List[str] , A__ : Tuple=None ): SCREAMING_SNAKE_CASE = Counter() counter.update([x[1] for x in logs] ) SCREAMING_SNAKE_CASE = counter.most_common() SCREAMING_SNAKE_CASE = {} for error, count in counts: if error_filter is None or error not in error_filter: SCREAMING_SNAKE_CASE = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]} SCREAMING_SNAKE_CASE = dict(sorted(r.items() , key=lambda A__ : item[1]["count"] , reverse=A__ ) ) return r def __a ( A__ : str ): SCREAMING_SNAKE_CASE = test.split("::" )[0] if test.startswith("tests/models/" ): SCREAMING_SNAKE_CASE = test.split("/" )[2] else: SCREAMING_SNAKE_CASE = None return test def __a ( A__ : List[str] , A__ : Dict=None ): SCREAMING_SNAKE_CASE = [(x[0], x[1], get_model(x[2] )) for x in logs] SCREAMING_SNAKE_CASE = [x for x in logs if x[2] is not None] SCREAMING_SNAKE_CASE = {x[2] for x in logs} SCREAMING_SNAKE_CASE = {} for test in tests: SCREAMING_SNAKE_CASE = Counter() # count by errors in `test` counter.update([x[1] for x in logs if x[2] == test] ) SCREAMING_SNAKE_CASE = counter.most_common() SCREAMING_SNAKE_CASE = {error: count for error, count in counts if (error_filter is None or error not in error_filter)} SCREAMING_SNAKE_CASE = sum(error_counts.values() ) if n_errors > 0: SCREAMING_SNAKE_CASE = {"count": n_errors, "errors": error_counts} SCREAMING_SNAKE_CASE = dict(sorted(r.items() , key=lambda A__ : item[1]["count"] , reverse=A__ ) ) return r def __a ( A__ : Dict ): SCREAMING_SNAKE_CASE = "| no. | error | status |" SCREAMING_SNAKE_CASE = "|-:|:-|:-|" SCREAMING_SNAKE_CASE = [header, sep] for error in reduced_by_error: SCREAMING_SNAKE_CASE = reduced_by_error[error]["count"] SCREAMING_SNAKE_CASE = F"| {count} | {error[:100]} | |" lines.append(A__ ) return "\n".join(A__ ) def __a ( A__ : Optional[Any] ): SCREAMING_SNAKE_CASE = "| model | no. of errors | major error | count |" SCREAMING_SNAKE_CASE = "|-:|-:|-:|-:|" SCREAMING_SNAKE_CASE = [header, sep] for model in reduced_by_model: SCREAMING_SNAKE_CASE = reduced_by_model[model]["count"] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = list(reduced_by_model[model]["errors"].items() )[0] SCREAMING_SNAKE_CASE = F"| {model} | {count} | {error[:60]} | {_count} |" lines.append(A__ ) return "\n".join(A__ ) if __name__ == "__main__": __A : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.') parser.add_argument( '--output_dir', type=str, required=True, help='Where to store the downloaded artifacts and other result files.', ) parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.') __A : Union[str, Any] = parser.parse_args() os.makedirs(args.output_dir, exist_ok=True) __A : int = get_job_links(args.workflow_run_id, token=args.token) __A : Dict = {} # To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee. # For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`. if _job_links: for k, v in _job_links.items(): # This is how GitHub actions combine job names. if " / " in k: __A : Union[str, Any] = k.find(' / ') __A : Optional[int] = k[index + len(' / ') :] __A : Optional[int] = v with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp: json.dump(job_links, fp, ensure_ascii=False, indent=4) __A : int = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) for idx, (name, url) in enumerate(artifacts.items()): download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) __A : Optional[int] = get_all_errors(args.output_dir, job_links=job_links) # `e[1]` is the error __A : Dict = Counter() counter.update([e[1] for e in errors]) # print the top 30 most common test errors __A : Optional[Any] = counter.most_common(3_0) for item in most_common: print(item) with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp: json.dump(errors, fp, ensure_ascii=False, indent=4) __A : str = reduce_by_error(errors) __A : int = reduce_by_model(errors) __A : Any = make_github_table(reduced_by_error) __A : List[str] = make_github_table_per_model(reduced_by_model) with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp: fp.write(sa) with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp: fp.write(sa)
16
1
import argparse import json import os import torch from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def __a ( A__ : Tuple , A__ : Any , A__ : List[Any] , A__ : Any , A__ : Optional[int] ): # Load configuration defined in the metadata file with open(A__ ) as metadata_file: SCREAMING_SNAKE_CASE = json.load(A__ ) SCREAMING_SNAKE_CASE = LukeConfig(use_entity_aware_attention=A__ , **metadata["model_config"] ) # Load in the weights from the checkpoint_path SCREAMING_SNAKE_CASE = torch.load(A__ , map_location="cpu" ) # Load the entity vocab file SCREAMING_SNAKE_CASE = load_entity_vocab(A__ ) SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] ) # Add special tokens to the token vocabulary for downstream tasks SCREAMING_SNAKE_CASE = AddedToken("<ent>" , lstrip=A__ , rstrip=A__ ) SCREAMING_SNAKE_CASE = AddedToken("<ent2>" , lstrip=A__ , rstrip=A__ ) tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(F"Saving tokenizer to {pytorch_dump_folder_path}" ) tokenizer.save_pretrained(A__ ) with open(os.path.join(A__ , LukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f: json.dump(A__ , A__ ) SCREAMING_SNAKE_CASE = LukeTokenizer.from_pretrained(A__ ) # Initialize the embeddings of the special tokens SCREAMING_SNAKE_CASE = state_dict["embeddings.word_embeddings.weight"] SCREAMING_SNAKE_CASE = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 ) SCREAMING_SNAKE_CASE = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 ) SCREAMING_SNAKE_CASE = torch.cat([word_emb, ent_emb, enta_emb] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: SCREAMING_SNAKE_CASE = F"encoder.layer.{layer_index}.attention.self." SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name] SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name] SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks SCREAMING_SNAKE_CASE = state_dict["entity_embeddings.entity_embeddings.weight"] SCREAMING_SNAKE_CASE = entity_emb[entity_vocab["[MASK]"]] SCREAMING_SNAKE_CASE = LukeModel(config=A__ ).eval() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = model.load_state_dict(A__ , strict=A__ ) if not (len(A__ ) == 1 and missing_keys[0] == "embeddings.position_ids"): raise ValueError(F"Missing keys {', '.join(A__ )}. Expected only missing embeddings.position_ids" ) if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )): raise ValueError( "Unexpected keys" F" {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}" ) # Check outputs SCREAMING_SNAKE_CASE = LukeTokenizer.from_pretrained(A__ , task="entity_classification" ) SCREAMING_SNAKE_CASE = ( "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the" " new world number one avoid a humiliating second- round exit at Wimbledon ." ) SCREAMING_SNAKE_CASE = (39, 42) SCREAMING_SNAKE_CASE = tokenizer(A__ , entity_spans=[span] , add_prefix_space=A__ , return_tensors="pt" ) SCREAMING_SNAKE_CASE = model(**A__ ) # Verify word hidden states if model_size == "large": SCREAMING_SNAKE_CASE = torch.Size((1, 42, 1024) ) SCREAMING_SNAKE_CASE = torch.tensor( [[0.0_1_3_3, 0.0_8_6_5, 0.0_0_9_5], [0.3_0_9_3, -0.2_5_7_6, -0.7_4_1_8], [-0.1_7_2_0, -0.2_1_1_7, -0.2_8_6_9]] ) else: # base SCREAMING_SNAKE_CASE = torch.Size((1, 42, 768) ) SCREAMING_SNAKE_CASE = torch.tensor([[0.0_0_3_7, 0.1_3_6_8, -0.0_0_9_1], [0.1_0_9_9, 0.3_3_2_9, -0.1_0_9_5], [0.0_7_6_5, 0.5_3_3_5, 0.1_1_7_9]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( F"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , A__ , atol=1E-4 ): raise ValueError # Verify entity hidden states if model_size == "large": SCREAMING_SNAKE_CASE = torch.Size((1, 1, 1024) ) SCREAMING_SNAKE_CASE = torch.tensor([[0.0_4_6_6, -0.0_1_0_6, -0.0_1_7_9]] ) else: # base SCREAMING_SNAKE_CASE = torch.Size((1, 1, 768) ) SCREAMING_SNAKE_CASE = torch.tensor([[0.1_4_5_7, 0.1_0_4_4, 0.0_1_7_4]] ) if not (outputs.entity_last_hidden_state.shape != expected_shape): raise ValueError( F"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is" F" {expected_shape}" ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , A__ , atol=1E-4 ): raise ValueError # Finally, save our PyTorch model and tokenizer print("Saving PyTorch model to {}".format(A__ ) ) model.save_pretrained(A__ ) def __a ( A__ : List[Any] ): SCREAMING_SNAKE_CASE = {} with open(A__ , "r" , encoding="utf-8" ) as f: for index, line in enumerate(A__ ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = line.rstrip().split("\t" ) SCREAMING_SNAKE_CASE = index return entity_vocab if __name__ == "__main__": __A : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.') parser.add_argument( '--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.' ) parser.add_argument( '--entity_vocab_path', default=None, type=str, help='Path to an entity_vocab.tsv file, containing the entity vocabulary.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.' ) parser.add_argument( '--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.' ) __A : Optional[int] = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
16
from ...configuration_utils import PretrainedConfig from ...utils import logging __A : Optional[int] = logging.get_logger(__name__) __A : Optional[Any] = { 'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json', # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = "gpt_neox" def __init__( self : Optional[int] , __lowerCamelCase : List[str]=50432 , __lowerCamelCase : int=6144 , __lowerCamelCase : Optional[Any]=44 , __lowerCamelCase : Tuple=64 , __lowerCamelCase : Optional[int]=24576 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Any=0.25 , __lowerCamelCase : List[Any]=10000 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : int=0.0 , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[Any]=2048 , __lowerCamelCase : Tuple=0.02 , __lowerCamelCase : Tuple=1e-5 , __lowerCamelCase : Dict=True , __lowerCamelCase : int=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : List[str]=False , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : str , ): super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = rotary_pct SCREAMING_SNAKE_CASE = rotary_emb_base SCREAMING_SNAKE_CASE = attention_dropout SCREAMING_SNAKE_CASE = hidden_dropout SCREAMING_SNAKE_CASE = classifier_dropout SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = use_cache SCREAMING_SNAKE_CASE = tie_word_embeddings SCREAMING_SNAKE_CASE = use_parallel_residual SCREAMING_SNAKE_CASE = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( "The hidden size is not divisble by the number of attention heads! Make sure to update them!" ) def _snake_case ( self : Union[str, Any] ): if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __lowerCamelCase ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " f"got {self.rope_scaling}" ) SCREAMING_SNAKE_CASE = self.rope_scaling.get("type" , __lowerCamelCase ) SCREAMING_SNAKE_CASE = self.rope_scaling.get("factor" , __lowerCamelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" ) if rope_scaling_factor is None or not isinstance(__lowerCamelCase , __lowerCamelCase ) or rope_scaling_factor <= 1.0: raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
16
1
import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __A : List[str] = logging.get_logger(__name__) __A : Union[str, Any] = {'vocab_file': 'spiece.model'} __A : int = { 'vocab_file': { 'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model', } } __A : Optional[int] = { 'AI-Sweden/gpt-sw3-126m': 2_0_4_8, 'AI-Sweden/gpt-sw3-350m': 2_0_4_8, 'AI-Sweden/gpt-sw3-1.6b': 2_0_4_8, 'AI-Sweden/gpt-sw3-6.7b': 2_0_4_8, 'AI-Sweden/gpt-sw3-20b': 2_0_4_8, } class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ = ["input_ids", "attention_mask"] def __init__( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : str=False , __lowerCamelCase : int=False , __lowerCamelCase : Dict=False , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Dict , ): SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs SCREAMING_SNAKE_CASE = kwargs.get("name_or_path" ) if name_or_path is None: logger.warning( "name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b," " you are testing the model, this can safely be ignored" ) SCREAMING_SNAKE_CASE = "None" # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing SCREAMING_SNAKE_CASE = "<|endoftext|>" if eos_token is None else eos_token SCREAMING_SNAKE_CASE = "<unk>" if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: SCREAMING_SNAKE_CASE = unk_token if pad_token is None else pad_token SCREAMING_SNAKE_CASE = eos_token if bos_token is None else bos_token else: SCREAMING_SNAKE_CASE = "<pad>" if pad_token is None else pad_token SCREAMING_SNAKE_CASE = "<s>" if bos_token is None else bos_token super().__init__( do_lower_case=__lowerCamelCase , remove_space=__lowerCamelCase , keep_accents=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , ) SCREAMING_SNAKE_CASE = do_lower_case SCREAMING_SNAKE_CASE = remove_space SCREAMING_SNAKE_CASE = keep_accents SCREAMING_SNAKE_CASE = vocab_file SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__lowerCamelCase ) # Used for whitespace normalization in input texts # fmt : off SCREAMING_SNAKE_CASE = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", "„"} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing SCREAMING_SNAKE_CASE = re.compile( f"[{''.join(map(__lowerCamelCase , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]" ) def __getstate__( self : int ): SCREAMING_SNAKE_CASE = self.__dict__.copy() SCREAMING_SNAKE_CASE = None return state def __setstate__( self : Union[str, Any] , __lowerCamelCase : str ): SCREAMING_SNAKE_CASE = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def _snake_case ( self : Tuple ): return len(self.sp_model ) def _snake_case ( self : Optional[Any] , __lowerCamelCase : str ): SCREAMING_SNAKE_CASE = self.non_printing_characters_re.sub("" , __lowerCamelCase ) # Normalize whitespaces SCREAMING_SNAKE_CASE = "".join([char if char not in self.whitespaces else " " for char in text] ) # NFC Unicode normalization SCREAMING_SNAKE_CASE = unicodedata.normalize("NFC" , __lowerCamelCase ) return text def _snake_case ( self : Dict , __lowerCamelCase : str , **__lowerCamelCase : Tuple ): SCREAMING_SNAKE_CASE = self.preprocess_text(__lowerCamelCase ) return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase ) def _snake_case ( self : List[Any] , __lowerCamelCase : str ): return self.sp_model.PieceToId(__lowerCamelCase ) def _snake_case ( self : Any , __lowerCamelCase : int ): return self.sp_model.IdToPiece(__lowerCamelCase ) @staticmethod def _snake_case ( __lowerCamelCase : str ): return out_string def _snake_case ( self : List[Any] , __lowerCamelCase : List[str] ): SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = "" SCREAMING_SNAKE_CASE = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__lowerCamelCase ) + token SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = [] else: current_sub_tokens.append(__lowerCamelCase ) SCREAMING_SNAKE_CASE = False out_string += self.sp_model.decode(__lowerCamelCase ) return out_string def _snake_case ( self : Tuple ): SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _snake_case ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): if not os.path.isdir(__lowerCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return SCREAMING_SNAKE_CASE = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCamelCase , "wb" ) as fi: SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto() fi.write(__lowerCamelCase ) return (out_vocab_file,) def _snake_case ( self : Any , __lowerCamelCase : Union[str, List[str]] , __lowerCamelCase : Union[str, bool] = False ): if isinstance(__lowerCamelCase , __lowerCamelCase ): SCREAMING_SNAKE_CASE = self.preprocess_text(__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.sp_model.encode(__lowerCamelCase ) else: SCREAMING_SNAKE_CASE = [self.preprocess_text(__lowerCamelCase ) for t in text] SCREAMING_SNAKE_CASE = self.sp_model.encode(__lowerCamelCase ) if return_tensors is True or return_tensors == "pt": SCREAMING_SNAKE_CASE = torch.tensor(__lowerCamelCase ) return token_ids def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Union[int, List[int]] ): return self.sp_model.decode(__lowerCamelCase ) def _snake_case ( self : Dict , __lowerCamelCase : "Conversation" ): SCREAMING_SNAKE_CASE = [f"User: {text}" if is_user else f"Bot: {text}" for is_user, text in conversation.iter_texts()] SCREAMING_SNAKE_CASE = ( f"{self.eos_token}{self.bos_token}" + f"{self.bos_token}".join(__lowerCamelCase ) + f"{self.bos_token}Bot:" ) return self.encode(text=__lowerCamelCase )
16
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A : List[Any] = { 'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'], 'processing_git': ['GitProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : int = [ 'GIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'GitForCausalLM', 'GitModel', 'GitPreTrainedModel', 'GitVisionModel', ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys __A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
16
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __A : List[Any] = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = ["pixel_values"] def __init__( self : Union[str, Any] , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : PILImageResampling = PIL.Image.BICUBIC , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : Union[int, float] = 1 / 255 , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , **__lowerCamelCase : int , ): super().__init__(**__lowerCamelCase ) SCREAMING_SNAKE_CASE = size if size is not None else {"height": 256, "width": 256} SCREAMING_SNAKE_CASE = get_size_dict(__lowerCamelCase ) SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"height": 224, "width": 224} SCREAMING_SNAKE_CASE = get_size_dict(__lowerCamelCase , param_name="crop_size" ) SCREAMING_SNAKE_CASE = do_resize SCREAMING_SNAKE_CASE = size SCREAMING_SNAKE_CASE = resample SCREAMING_SNAKE_CASE = do_center_crop SCREAMING_SNAKE_CASE = crop_size SCREAMING_SNAKE_CASE = do_rescale SCREAMING_SNAKE_CASE = rescale_factor SCREAMING_SNAKE_CASE = do_normalize SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_STANDARD_STD def _snake_case ( self : List[Any] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : PILImageResampling = PIL.Image.BICUBIC , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : List[str] , ): SCREAMING_SNAKE_CASE = get_size_dict(__lowerCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}" ) return resize( __lowerCamelCase , size=(size["height"], size["width"]) , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase ) def _snake_case ( self : Any , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Any , ): SCREAMING_SNAKE_CASE = get_size_dict(__lowerCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}" ) return center_crop(__lowerCamelCase , size=(size["height"], size["width"]) , data_format=__lowerCamelCase , **__lowerCamelCase ) def _snake_case ( self : Any , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[int, float] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : List[Any] , ): return rescale(__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase ) def _snake_case ( self : List[Any] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Optional[Any] , ): return normalize(__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase ) def _snake_case ( self : Union[str, Any] , __lowerCamelCase : ImageInput , __lowerCamelCase : bool = None , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : bool = None , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : bool = None , __lowerCamelCase : float = None , __lowerCamelCase : bool = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : ChannelDimension = ChannelDimension.FIRST , **__lowerCamelCase : List[Any] , ): SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE = do_center_crop if do_center_crop is not None else self.do_center_crop SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std SCREAMING_SNAKE_CASE = size if size is not None else self.size SCREAMING_SNAKE_CASE = get_size_dict(__lowerCamelCase ) SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else self.crop_size SCREAMING_SNAKE_CASE = get_size_dict(__lowerCamelCase , param_name="crop_size" ) SCREAMING_SNAKE_CASE = make_list_of_images(__lowerCamelCase ) if not valid_images(__lowerCamelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE = [to_numpy_array(__lowerCamelCase ) for image in images] if do_resize: SCREAMING_SNAKE_CASE = [self.resize(image=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase ) for image in images] if do_center_crop: SCREAMING_SNAKE_CASE = [self.center_crop(image=__lowerCamelCase , size=__lowerCamelCase ) for image in images] if do_rescale: SCREAMING_SNAKE_CASE = [self.rescale(image=__lowerCamelCase , scale=__lowerCamelCase ) for image in images] if do_normalize: SCREAMING_SNAKE_CASE = [self.normalize(image=__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase ) for image in images] SCREAMING_SNAKE_CASE = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images] SCREAMING_SNAKE_CASE = {"pixel_values": images} return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
16
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __A : Union[str, Any] = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) __A : List[Any] = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias')) rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias')) rename_keys.append( (f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append( ( f'transformer.decoder.layers.{i}.cross_attn.out_proj.weight', f'decoder.layers.{i}.encoder_attn.out_proj.weight', ) ) rename_keys.append( ( f'transformer.decoder.layers.{i}.cross_attn.out_proj.bias', f'decoder.layers.{i}.encoder_attn.out_proj.bias', ) ) rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias')) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (f'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', f'decoder.layers.{i}.sa_qcontent_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', f'decoder.layers.{i}.sa_kcontent_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.sa_qpos_proj.weight', f'decoder.layers.{i}.sa_qpos_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.sa_kpos_proj.weight', f'decoder.layers.{i}.sa_kpos_proj.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.weight', f'decoder.layers.{i}.sa_v_proj.weight')) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', f'decoder.layers.{i}.ca_qcontent_proj.weight') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', f'decoder.layers.{i}.ca_kcontent_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_kpos_proj.weight', f'decoder.layers.{i}.ca_kpos_proj.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.weight', f'decoder.layers.{i}.ca_v_proj.weight')) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', f'decoder.layers.{i}.ca_qpos_sine_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', f'decoder.layers.{i}.sa_qcontent_proj.bias') ) rename_keys.append( (f'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', f'decoder.layers.{i}.sa_kcontent_proj.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.sa_qpos_proj.bias', f'decoder.layers.{i}.sa_qpos_proj.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.sa_kpos_proj.bias', f'decoder.layers.{i}.sa_kpos_proj.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.bias', f'decoder.layers.{i}.sa_v_proj.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', f'decoder.layers.{i}.ca_qcontent_proj.bias') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', f'decoder.layers.{i}.ca_kcontent_proj.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.ca_kpos_proj.bias', f'decoder.layers.{i}.ca_kpos_proj.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.bias', f'decoder.layers.{i}.ca_v_proj.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', f'decoder.layers.{i}.ca_qpos_sine_proj.bias') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ('input_proj.weight', 'input_projection.weight'), ('input_proj.bias', 'input_projection.bias'), ('query_embed.weight', 'query_position_embeddings.weight'), ('transformer.decoder.norm.weight', 'decoder.layernorm.weight'), ('transformer.decoder.norm.bias', 'decoder.layernorm.bias'), ('class_embed.weight', 'class_labels_classifier.weight'), ('class_embed.bias', 'class_labels_classifier.bias'), ('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'), ('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'), ('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'), ('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'), ('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'), ('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'), ('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'), ('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'), ('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'), ('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'), ('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'), ('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'), ('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'), ('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'), ('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'), ('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'), ] ) def __a ( A__ : Dict , A__ : Dict , A__ : Any ): SCREAMING_SNAKE_CASE = state_dict.pop(A__ ) SCREAMING_SNAKE_CASE = val def __a ( A__ : Optional[int] ): SCREAMING_SNAKE_CASE = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: SCREAMING_SNAKE_CASE = key.replace("backbone.0.body" , "backbone.conv_encoder.model" ) SCREAMING_SNAKE_CASE = value else: SCREAMING_SNAKE_CASE = value return new_state_dict def __a ( A__ : Optional[Any] , A__ : Tuple=False ): SCREAMING_SNAKE_CASE = "" if is_panoptic: SCREAMING_SNAKE_CASE = "conditional_detr." # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" ) SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE = in_proj_weight[:256, :] SCREAMING_SNAKE_CASE = in_proj_bias[:256] SCREAMING_SNAKE_CASE = in_proj_weight[256:512, :] SCREAMING_SNAKE_CASE = in_proj_bias[256:512] SCREAMING_SNAKE_CASE = in_proj_weight[-256:, :] SCREAMING_SNAKE_CASE = in_proj_bias[-256:] def __a ( ): SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg" SCREAMING_SNAKE_CASE = Image.open(requests.get(A__ , stream=A__ ).raw ) return im @torch.no_grad() def __a ( A__ : List[str] , A__ : Union[str, Any] ): SCREAMING_SNAKE_CASE = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: SCREAMING_SNAKE_CASE = "resnet101" if "dc5" in model_name: SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = "panoptic" in model_name if is_panoptic: SCREAMING_SNAKE_CASE = 250 else: SCREAMING_SNAKE_CASE = 91 SCREAMING_SNAKE_CASE = "huggingface/label-files" SCREAMING_SNAKE_CASE = "coco-detection-id2label.json" SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(A__ , A__ , repo_type="dataset" ) , "r" ) ) SCREAMING_SNAKE_CASE = {int(A__ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE = idalabel SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} # load image processor SCREAMING_SNAKE_CASE = "coco_panoptic" if is_panoptic else "coco_detection" SCREAMING_SNAKE_CASE = ConditionalDetrImageProcessor(format=A__ ) # prepare image SCREAMING_SNAKE_CASE = prepare_img() SCREAMING_SNAKE_CASE = image_processor(images=A__ , return_tensors="pt" ) SCREAMING_SNAKE_CASE = encoding["pixel_values"] logger.info(F"Converting model {model_name}..." ) # load original model from torch hub SCREAMING_SNAKE_CASE = torch.hub.load("DeppMeng/ConditionalDETR" , A__ , pretrained=A__ ).eval() SCREAMING_SNAKE_CASE = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: SCREAMING_SNAKE_CASE = "conditional_detr." + src rename_key(A__ , A__ , A__ ) SCREAMING_SNAKE_CASE = rename_backbone_keys(A__ ) # query, key and value matrices need special treatment read_in_q_k_v(A__ , is_panoptic=A__ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them SCREAMING_SNAKE_CASE = "conditional_detr.model." if is_panoptic else "model." for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("conditional_detr" ) and not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ) ): SCREAMING_SNAKE_CASE = state_dict.pop(A__ ) SCREAMING_SNAKE_CASE = val elif "class_labels_classifier" in key or "bbox_predictor" in key: SCREAMING_SNAKE_CASE = state_dict.pop(A__ ) SCREAMING_SNAKE_CASE = val elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ): continue else: SCREAMING_SNAKE_CASE = state_dict.pop(A__ ) SCREAMING_SNAKE_CASE = val else: if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ): SCREAMING_SNAKE_CASE = state_dict.pop(A__ ) SCREAMING_SNAKE_CASE = val # finally, create HuggingFace model and load state dict SCREAMING_SNAKE_CASE = ConditionalDetrForSegmentation(A__ ) if is_panoptic else ConditionalDetrForObjectDetection(A__ ) model.load_state_dict(A__ ) model.eval() model.push_to_hub(repo_id=A__ , organization="DepuMeng" , commit_message="Add model" ) # verify our conversion SCREAMING_SNAKE_CASE = conditional_detr(A__ ) SCREAMING_SNAKE_CASE = model(A__ ) assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 ) # Save model and image processor logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." ) Path(A__ ).mkdir(exist_ok=A__ ) model.save_pretrained(A__ ) image_processor.save_pretrained(A__ ) if __name__ == "__main__": __A : str = argparse.ArgumentParser() parser.add_argument( '--model_name', default='conditional_detr_resnet50', type=str, help='Name of the CONDITIONAL_DETR model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) __A : int = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
16
1
def __a ( A__ : int = 1000 ): SCREAMING_SNAKE_CASE = 3 SCREAMING_SNAKE_CASE = 0 while a < n: if a % 3 == 0 or a % 5 == 0: result += a elif a % 15 == 0: result -= a a += 1 return result if __name__ == "__main__": print(f'{solution() = }')
16
from __future__ import annotations def __a ( A__ : list[int | str] ): create_state_space_tree(A__ , [] , 0 , [0 for i in range(len(A__ ) )] ) def __a ( A__ : list[int | str] , A__ : list[int | str] , A__ : int , A__ : list[int] , ): if index == len(A__ ): print(A__ ) return for i in range(len(A__ ) ): if not index_used[i]: current_sequence.append(sequence[i] ) SCREAMING_SNAKE_CASE = True create_state_space_tree(A__ , A__ , index + 1 , A__ ) current_sequence.pop() SCREAMING_SNAKE_CASE = False __A : list[int | str] = [3, 1, 2, 4] generate_all_permutations(sequence) __A : list[int | str] = ["A", "B", "C"] generate_all_permutations(sequence_a)
16
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __A : Union[str, Any] = { 'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig'] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[Any] = ['ConvNextFeatureExtractor'] __A : Any = ['ConvNextImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Dict = [ 'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'ConvNextForImageClassification', 'ConvNextModel', 'ConvNextPreTrainedModel', 'ConvNextBackbone', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = [ 'TFConvNextForImageClassification', 'TFConvNextModel', 'TFConvNextPreTrainedModel', ] if TYPE_CHECKING: from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_convnext import ConvNextFeatureExtractor from .image_processing_convnext import ConvNextImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convnext import ( CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel else: import sys __A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
16
def __a ( A__ : int = 1000 ): SCREAMING_SNAKE_CASE = 3 SCREAMING_SNAKE_CASE = 0 while a < n: if a % 3 == 0 or a % 5 == 0: result += a elif a % 15 == 0: result -= a a += 1 return result if __name__ == "__main__": print(f'{solution() = }')
16
1
from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any]=3 , __lowerCamelCase : Any=32 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Optional[int]=10 , __lowerCamelCase : int=[10, 20, 30, 40] , __lowerCamelCase : str=[1, 1, 2, 1] , __lowerCamelCase : str=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : List[Any]="relu" , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Union[str, Any]=None , ): SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = embeddings_size SCREAMING_SNAKE_CASE = hidden_sizes SCREAMING_SNAKE_CASE = depths SCREAMING_SNAKE_CASE = is_training SCREAMING_SNAKE_CASE = use_labels SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = num_labels SCREAMING_SNAKE_CASE = scope SCREAMING_SNAKE_CASE = len(__lowerCamelCase ) def _snake_case ( self : str ): SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE = None if self.use_labels: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels ) SCREAMING_SNAKE_CASE = self.get_config() return config, pixel_values, labels def _snake_case ( self : List[str] ): return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def _snake_case ( self : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ): SCREAMING_SNAKE_CASE = TFResNetModel(config=__lowerCamelCase ) SCREAMING_SNAKE_CASE = model(__lowerCamelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _snake_case ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] ): SCREAMING_SNAKE_CASE = self.num_labels SCREAMING_SNAKE_CASE = TFResNetForImageClassification(__lowerCamelCase ) SCREAMING_SNAKE_CASE = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self : List[Any] ): SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () lowerCamelCase__ = ( {"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification} if is_tf_available() else {} ) lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False def _snake_case ( self : Any ): SCREAMING_SNAKE_CASE = TFResNetModelTester(self ) SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase ) def _snake_case ( self : int ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _snake_case ( self : List[str] ): return @unittest.skip(reason="ResNet does not use inputs_embeds" ) def _snake_case ( self : List[str] ): pass @unittest.skip(reason="ResNet does not support input and output embeddings" ) def _snake_case ( self : Any ): pass def _snake_case ( self : Any ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) SCREAMING_SNAKE_CASE = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def _snake_case ( self : Any ): def check_hidden_states_output(__lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] ): SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states SCREAMING_SNAKE_CASE = self.model_tester.num_stages self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: SCREAMING_SNAKE_CASE = layer_type SCREAMING_SNAKE_CASE = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def _snake_case ( self : int ): for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE = TFResNetModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def __a ( ): SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @cached_property def _snake_case ( self : Optional[int] ): return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _snake_case ( self : str ): SCREAMING_SNAKE_CASE = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) SCREAMING_SNAKE_CASE = self.default_image_processor SCREAMING_SNAKE_CASE = prepare_img() SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="tf" ) # forward pass SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ) # verify the logits SCREAMING_SNAKE_CASE = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) SCREAMING_SNAKE_CASE = tf.constant([-11.1_069, -9.7_877, -8.3_777] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __lowerCamelCase , atol=1e-4 ) )
16
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A : Dict = { 'configuration_bigbird_pegasus': [ 'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BigBirdPegasusConfig', 'BigBirdPegasusOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : int = [ 'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST', 'BigBirdPegasusForCausalLM', 'BigBirdPegasusForConditionalGeneration', 'BigBirdPegasusForQuestionAnswering', 'BigBirdPegasusForSequenceClassification', 'BigBirdPegasusModel', 'BigBirdPegasusPreTrainedModel', ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys __A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
16
1
import copy import inspect import unittest from transformers import PretrainedConfig, SwiftFormerConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwiftFormerForImageClassification, SwiftFormerModel from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : int=13 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : str=True , __lowerCamelCase : Any=True , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Optional[int]=224 , __lowerCamelCase : Any=1000 , __lowerCamelCase : Optional[Any]=[3, 3, 6, 4] , __lowerCamelCase : List[Any]=[48, 56, 112, 220] , ): SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = is_training SCREAMING_SNAKE_CASE = use_labels SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = num_labels SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = layer_depths SCREAMING_SNAKE_CASE = embed_dims def _snake_case ( self : Optional[Any] ): SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE = None if self.use_labels: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels ) SCREAMING_SNAKE_CASE = self.get_config() return config, pixel_values, labels def _snake_case ( self : Dict ): return SwiftFormerConfig( depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=__lowerCamelCase , layer_scale_init_value=1e-5 , ) def _snake_case ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ): SCREAMING_SNAKE_CASE = SwiftFormerModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) ) def _snake_case ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] ): SCREAMING_SNAKE_CASE = self.num_labels SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE = model(__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self : int ): ((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else () lowerCamelCase__ = ( {"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification} if is_torch_available() else {} ) lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = SwiftFormerModelTester(self ) SCREAMING_SNAKE_CASE = ConfigTester( self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , ) def _snake_case ( self : List[Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="SwiftFormer does not use inputs_embeds" ) def _snake_case ( self : Optional[int] ): pass def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) SCREAMING_SNAKE_CASE = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) ) def _snake_case ( self : List[Any] ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) SCREAMING_SNAKE_CASE = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def _snake_case ( self : Tuple ): for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE = SwiftFormerModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) @unittest.skip(reason="SwiftFormer does not output attentions" ) def _snake_case ( self : Union[str, Any] ): pass def _snake_case ( self : Optional[Any] ): def check_hidden_states_output(__lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Tuple ): SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) SCREAMING_SNAKE_CASE = outputs.hidden_states SCREAMING_SNAKE_CASE = 8 self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase ) # TODO # SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width) # with the width and height being successively divided by 2, after every 2 blocks for i in range(len(__lowerCamelCase ) ): self.assertEqual( hidden_states[i].shape , torch.Size( [ self.model_tester.batch_size, self.model_tester.embed_dims[i // 2], (self.model_tester.image_size // 4) // 2 ** (i // 2), (self.model_tester.image_size // 4) // 2 ** (i // 2), ] ) , ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def _snake_case ( self : List[Any] ): def _config_zero_init(__lowerCamelCase : List[Any] ): SCREAMING_SNAKE_CASE = copy.deepcopy(__lowerCamelCase ) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(__lowerCamelCase , __lowerCamelCase , 1e-10 ) if isinstance(getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ): SCREAMING_SNAKE_CASE = _config_zero_init(getattr(__lowerCamelCase , __lowerCamelCase ) ) setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) return configs_no_init SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE = _config_zero_init(__lowerCamelCase ) for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = model_class(config=__lowerCamelCase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , ) @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def _snake_case ( self : str ): pass def __a ( ): SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @cached_property def _snake_case ( self : List[str] ): return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None @slow def _snake_case ( self : List[Any] ): SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.default_image_processor SCREAMING_SNAKE_CASE = prepare_img() SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ) # verify the logits SCREAMING_SNAKE_CASE = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) SCREAMING_SNAKE_CASE = torch.tensor([[-2.17_03e00, 2.11_07e00, -2.08_11e00]] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
16
import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Any ): SCREAMING_SNAKE_CASE = "hf-internal-testing/tiny-random-t5" SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer("This is me" , return_tensors="pt" ) SCREAMING_SNAKE_CASE = model.to_bettertransformer() self.assertTrue(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) SCREAMING_SNAKE_CASE = model.generate(**__lowerCamelCase ) SCREAMING_SNAKE_CASE = model.reverse_bettertransformer() self.assertFalse(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase ) self.assertFalse( any("BetterTransformer" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) SCREAMING_SNAKE_CASE = model_reloaded.generate(**__lowerCamelCase ) self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase ) ) def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = "hf-internal-testing/tiny-random-t5" SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(__lowerCamelCase ): model.save_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = model.reverse_bettertransformer() model.save_pretrained(__lowerCamelCase )
16
1
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline else: from .camera import create_pan_cameras from .pipeline_shap_e import ShapEPipeline from .pipeline_shap_e_img2img import ShapEImgaImgPipeline from .renderer import ( BoundingBoxVolume, ImportanceRaySampler, MLPNeRFModelOutput, MLPNeRSTFModel, ShapEParamsProjModel, ShapERenderer, StratifiedRaySampler, VoidNeRFModel, )
16
import copy import inspect import unittest from transformers import PretrainedConfig, SwiftFormerConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwiftFormerForImageClassification, SwiftFormerModel from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : int=13 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : str=True , __lowerCamelCase : Any=True , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Optional[int]=224 , __lowerCamelCase : Any=1000 , __lowerCamelCase : Optional[Any]=[3, 3, 6, 4] , __lowerCamelCase : List[Any]=[48, 56, 112, 220] , ): SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = is_training SCREAMING_SNAKE_CASE = use_labels SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = num_labels SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = layer_depths SCREAMING_SNAKE_CASE = embed_dims def _snake_case ( self : Optional[Any] ): SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE = None if self.use_labels: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels ) SCREAMING_SNAKE_CASE = self.get_config() return config, pixel_values, labels def _snake_case ( self : Dict ): return SwiftFormerConfig( depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=__lowerCamelCase , layer_scale_init_value=1e-5 , ) def _snake_case ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ): SCREAMING_SNAKE_CASE = SwiftFormerModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) ) def _snake_case ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] ): SCREAMING_SNAKE_CASE = self.num_labels SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE = model(__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self : int ): ((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else () lowerCamelCase__ = ( {"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification} if is_torch_available() else {} ) lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = SwiftFormerModelTester(self ) SCREAMING_SNAKE_CASE = ConfigTester( self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , ) def _snake_case ( self : List[Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="SwiftFormer does not use inputs_embeds" ) def _snake_case ( self : Optional[int] ): pass def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) SCREAMING_SNAKE_CASE = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) ) def _snake_case ( self : List[Any] ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) SCREAMING_SNAKE_CASE = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def _snake_case ( self : Tuple ): for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE = SwiftFormerModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) @unittest.skip(reason="SwiftFormer does not output attentions" ) def _snake_case ( self : Union[str, Any] ): pass def _snake_case ( self : Optional[Any] ): def check_hidden_states_output(__lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Tuple ): SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) SCREAMING_SNAKE_CASE = outputs.hidden_states SCREAMING_SNAKE_CASE = 8 self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase ) # TODO # SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width) # with the width and height being successively divided by 2, after every 2 blocks for i in range(len(__lowerCamelCase ) ): self.assertEqual( hidden_states[i].shape , torch.Size( [ self.model_tester.batch_size, self.model_tester.embed_dims[i // 2], (self.model_tester.image_size // 4) // 2 ** (i // 2), (self.model_tester.image_size // 4) // 2 ** (i // 2), ] ) , ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def _snake_case ( self : List[Any] ): def _config_zero_init(__lowerCamelCase : List[Any] ): SCREAMING_SNAKE_CASE = copy.deepcopy(__lowerCamelCase ) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(__lowerCamelCase , __lowerCamelCase , 1e-10 ) if isinstance(getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ): SCREAMING_SNAKE_CASE = _config_zero_init(getattr(__lowerCamelCase , __lowerCamelCase ) ) setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) return configs_no_init SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE = _config_zero_init(__lowerCamelCase ) for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = model_class(config=__lowerCamelCase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , ) @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def _snake_case ( self : str ): pass def __a ( ): SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @cached_property def _snake_case ( self : List[str] ): return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None @slow def _snake_case ( self : List[Any] ): SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.default_image_processor SCREAMING_SNAKE_CASE = prepare_img() SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ) # verify the logits SCREAMING_SNAKE_CASE = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) SCREAMING_SNAKE_CASE = torch.tensor([[-2.17_03e00, 2.11_07e00, -2.08_11e00]] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
16
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bert import BertTokenizer __A : Optional[Any] = logging.get_logger(__name__) __A : int = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} __A : int = { 'vocab_file': { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt', 'bert-base-multilingual-uncased': ( 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt' ), 'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt', 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt' ), 'bert-base-cased-finetuned-mrpc': ( 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt' ), 'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt', 'bert-base-german-dbmdz-uncased': ( 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt' ), 'wietsedv/bert-base-dutch-cased': ( 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json', 'bert-base-multilingual-uncased': ( 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json' ), 'bert-base-multilingual-cased': ( 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json' ), 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json' ), 'bert-base-cased-finetuned-mrpc': ( 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json' ), 'bert-base-german-dbmdz-cased': ( 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json' ), 'bert-base-german-dbmdz-uncased': ( 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json' ), 'wietsedv/bert-base-dutch-cased': ( 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json' ), }, } __A : Dict = { 'bert-base-uncased': 5_1_2, 'bert-large-uncased': 5_1_2, 'bert-base-cased': 5_1_2, 'bert-large-cased': 5_1_2, 'bert-base-multilingual-uncased': 5_1_2, 'bert-base-multilingual-cased': 5_1_2, 'bert-base-chinese': 5_1_2, 'bert-base-german-cased': 5_1_2, 'bert-large-uncased-whole-word-masking': 5_1_2, 'bert-large-cased-whole-word-masking': 5_1_2, 'bert-large-uncased-whole-word-masking-finetuned-squad': 5_1_2, 'bert-large-cased-whole-word-masking-finetuned-squad': 5_1_2, 'bert-base-cased-finetuned-mrpc': 5_1_2, 'bert-base-german-dbmdz-cased': 5_1_2, 'bert-base-german-dbmdz-uncased': 5_1_2, 'TurkuNLP/bert-base-finnish-cased-v1': 5_1_2, 'TurkuNLP/bert-base-finnish-uncased-v1': 5_1_2, 'wietsedv/bert-base-dutch-cased': 5_1_2, } __A : Optional[Any] = { 'bert-base-uncased': {'do_lower_case': True}, 'bert-large-uncased': {'do_lower_case': True}, 'bert-base-cased': {'do_lower_case': False}, 'bert-large-cased': {'do_lower_case': False}, 'bert-base-multilingual-uncased': {'do_lower_case': True}, 'bert-base-multilingual-cased': {'do_lower_case': False}, 'bert-base-chinese': {'do_lower_case': False}, 'bert-base-german-cased': {'do_lower_case': False}, 'bert-large-uncased-whole-word-masking': {'do_lower_case': True}, 'bert-large-cased-whole-word-masking': {'do_lower_case': False}, 'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True}, 'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False}, 'bert-base-cased-finetuned-mrpc': {'do_lower_case': False}, 'bert-base-german-dbmdz-cased': {'do_lower_case': False}, 'bert-base-german-dbmdz-uncased': {'do_lower_case': True}, 'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False}, 'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True}, 'wietsedv/bert-base-dutch-cased': {'do_lower_case': False}, } class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = PRETRAINED_INIT_CONFIGURATION lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ = BertTokenizer def __init__( self : Dict , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : List[Any]="[UNK]" , __lowerCamelCase : Dict="[SEP]" , __lowerCamelCase : List[str]="[PAD]" , __lowerCamelCase : Optional[int]="[CLS]" , __lowerCamelCase : Optional[Any]="[MASK]" , __lowerCamelCase : Any=True , __lowerCamelCase : List[str]=None , **__lowerCamelCase : Dict , ): super().__init__( __lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , ) SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , __lowerCamelCase ) != do_lower_case or normalizer_state.get("strip_accents" , __lowerCamelCase ) != strip_accents or normalizer_state.get("handle_chinese_chars" , __lowerCamelCase ) != tokenize_chinese_chars ): SCREAMING_SNAKE_CASE = getattr(__lowerCamelCase , normalizer_state.pop("type" ) ) SCREAMING_SNAKE_CASE = do_lower_case SCREAMING_SNAKE_CASE = strip_accents SCREAMING_SNAKE_CASE = tokenize_chinese_chars SCREAMING_SNAKE_CASE = normalizer_class(**__lowerCamelCase ) SCREAMING_SNAKE_CASE = do_lower_case def _snake_case ( self : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any]=None ): SCREAMING_SNAKE_CASE = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _snake_case ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): SCREAMING_SNAKE_CASE = [self.sep_token_id] SCREAMING_SNAKE_CASE = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _snake_case ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): SCREAMING_SNAKE_CASE = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase ) return tuple(__lowerCamelCase )
16
import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings __A : Optional[Any] = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = field(default=__snake_case , metadata={"help": "Whether to use SortishSampler or not."} ) lowerCamelCase__ = field( default=__snake_case , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} ) lowerCamelCase__ = field( default=__snake_case , metadata={ "help": ( "The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `max_length` value of the model configuration." ) } , ) lowerCamelCase__ = field( default=__snake_case , metadata={ "help": ( "The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `num_beams` value of the model configuration." ) } , ) lowerCamelCase__ = field( default=__snake_case , metadata={ "help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction." } , ) def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = super().to_dict() for k, v in d.items(): if isinstance(__lowerCamelCase , __lowerCamelCase ): SCREAMING_SNAKE_CASE = v.to_dict() return d
16
1
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input __A : List[Any] = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine' def __a ( ): SCREAMING_SNAKE_CASE = _ask_options( "In which compute environment are you running?" , ["This machine", "AWS (Amazon SageMaker)"] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: SCREAMING_SNAKE_CASE = get_sagemaker_input() else: SCREAMING_SNAKE_CASE = get_cluster_input() return config def __a ( A__ : int=None ): if subparsers is not None: SCREAMING_SNAKE_CASE = subparsers.add_parser("config" , description=A__ ) else: SCREAMING_SNAKE_CASE = argparse.ArgumentParser("Accelerate config command" , description=A__ ) parser.add_argument( "--config_file" , default=A__ , help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) , ) if subparsers is not None: parser.set_defaults(func=A__ ) return parser def __a ( A__ : int ): SCREAMING_SNAKE_CASE = get_user_input() if args.config_file is not None: SCREAMING_SNAKE_CASE = args.config_file else: if not os.path.isdir(A__ ): os.makedirs(A__ ) SCREAMING_SNAKE_CASE = default_yaml_config_file if config_file.endswith(".json" ): config.to_json_file(A__ ) else: config.to_yaml_file(A__ ) print(F"accelerate configuration saved at {config_file}" ) def __a ( ): SCREAMING_SNAKE_CASE = config_command_parser() SCREAMING_SNAKE_CASE = parser.parse_args() config_command(A__ ) if __name__ == "__main__": main()
16
import os def __a ( ): SCREAMING_SNAKE_CASE = os.path.join(os.path.dirname(A__ ) , "num.txt" ) with open(A__ ) as file_hand: return str(sum(int(A__ ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
16
1
import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __A : Optional[int] = get_tests_dir('fixtures/test_sentencepiece_no_bos.model') @require_sentencepiece @require_tokenizers class _SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = PegasusTokenizer lowerCamelCase__ = PegasusTokenizerFast lowerCamelCase__ = True lowerCamelCase__ = True def _snake_case ( self : Optional[Any] ): super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE = PegasusTokenizer(__lowerCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _snake_case ( self : Any ): return PegasusTokenizer.from_pretrained("google/pegasus-large" ) def _snake_case ( self : str , **__lowerCamelCase : Dict ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def _snake_case ( self : Optional[Any] , __lowerCamelCase : Any ): return ("This is a test", "This is a test") def _snake_case ( self : Optional[Any] ): SCREAMING_SNAKE_CASE = "</s>" SCREAMING_SNAKE_CASE = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase ) def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<pad>" ) self.assertEqual(vocab_keys[1] , "</s>" ) self.assertEqual(vocab_keys[-1] , "v" ) self.assertEqual(len(__lowerCamelCase ) , 1103 ) def _snake_case ( self : List[Any] ): self.assertEqual(self.get_tokenizer().vocab_size , 1103 ) def _snake_case ( self : Optional[Any] ): SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE = ( "Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important" " </s> <pad> <pad> <pad>" ) SCREAMING_SNAKE_CASE = rust_tokenizer([raw_input_str] , return_tensors=__lowerCamelCase , add_special_tokens=__lowerCamelCase ).input_ids[0] SCREAMING_SNAKE_CASE = py_tokenizer([raw_input_str] , return_tensors=__lowerCamelCase , add_special_tokens=__lowerCamelCase ).input_ids[0] self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) def _snake_case ( self : List[Any] ): SCREAMING_SNAKE_CASE = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word SCREAMING_SNAKE_CASE = "<mask_1> To ensure a <mask_2> flow of bank resolutions." SCREAMING_SNAKE_CASE = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1] SCREAMING_SNAKE_CASE = tokenizer([raw_input_str] , return_tensors=__lowerCamelCase ).input_ids[0] self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) def _snake_case ( self : str ): SCREAMING_SNAKE_CASE = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 96103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1024 SCREAMING_SNAKE_CASE = "To ensure a smooth flow of bank resolutions." SCREAMING_SNAKE_CASE = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1] SCREAMING_SNAKE_CASE = tokenizer([raw_input_str] , return_tensors=__lowerCamelCase ).input_ids[0] self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def _snake_case ( self : Any ): SCREAMING_SNAKE_CASE = ["This is going to be way too long." * 150, "short example"] SCREAMING_SNAKE_CASE = ["not super long but more than 5 tokens", "tiny"] SCREAMING_SNAKE_CASE = self._large_tokenizer(__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors="pt" ) SCREAMING_SNAKE_CASE = self._large_tokenizer( text_target=__lowerCamelCase , max_length=5 , padding=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors="pt" ) assert batch.input_ids.shape == (2, 1024) assert batch.attention_mask.shape == (2, 1024) assert targets["input_ids"].shape == (2, 5) assert len(__lowerCamelCase ) == 2 # input_ids, attention_mask. @slow def _snake_case ( self : str ): # fmt: off SCREAMING_SNAKE_CASE = {"input_ids": [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__lowerCamelCase , model_name="google/bigbird-pegasus-large-arxiv" , revision="ba85d0851d708441f91440d509690f1ab6353415" , ) @require_sentencepiece @require_tokenizers class _SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = PegasusTokenizer lowerCamelCase__ = PegasusTokenizerFast lowerCamelCase__ = True lowerCamelCase__ = True def _snake_case ( self : int ): super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE = PegasusTokenizer(__lowerCamelCase , offset=0 , mask_token_sent=__lowerCamelCase , mask_token="[MASK]" ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _snake_case ( self : str ): return PegasusTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv" ) def _snake_case ( self : int , **__lowerCamelCase : Optional[Any] ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def _snake_case ( self : Tuple , __lowerCamelCase : List[str] ): return ("This is a test", "This is a test") def _snake_case ( self : Optional[Any] ): SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE = ( "Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>" " <pad> <pad> <pad>" ) SCREAMING_SNAKE_CASE = rust_tokenizer([raw_input_str] , return_tensors=__lowerCamelCase , add_special_tokens=__lowerCamelCase ).input_ids[0] SCREAMING_SNAKE_CASE = py_tokenizer([raw_input_str] , return_tensors=__lowerCamelCase , add_special_tokens=__lowerCamelCase ).input_ids[0] self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) @require_torch def _snake_case ( self : Optional[Any] ): SCREAMING_SNAKE_CASE = ["This is going to be way too long." * 1000, "short example"] SCREAMING_SNAKE_CASE = ["not super long but more than 5 tokens", "tiny"] SCREAMING_SNAKE_CASE = self._large_tokenizer(__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors="pt" ) SCREAMING_SNAKE_CASE = self._large_tokenizer( text_target=__lowerCamelCase , max_length=5 , padding=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors="pt" ) assert batch.input_ids.shape == (2, 4096) assert batch.attention_mask.shape == (2, 4096) assert targets["input_ids"].shape == (2, 5) assert len(__lowerCamelCase ) == 2 # input_ids, attention_mask. def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = ( "This is an example string that is used to test the original TF implementation against the HF" " implementation" ) SCREAMING_SNAKE_CASE = self._large_tokenizer(__lowerCamelCase ).input_ids self.assertListEqual( __lowerCamelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
16
import pytest __A : Optional[Any] = '__dummy_dataset1__' __A : Optional[int] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n' @pytest.fixture def __a ( ): return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def __a ( ): return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def __a ( A__ : Optional[Any] , A__ : List[str] , A__ : Optional[int] ): SCREAMING_SNAKE_CASE = dataset_loading_script_name SCREAMING_SNAKE_CASE = tmp_path / "datasets" / script_name script_dir.mkdir(parents=A__ ) SCREAMING_SNAKE_CASE = script_dir / F"{script_name}.py" with open(A__ , "w" ) as f: f.write(A__ ) return str(A__ )
16
1
import gc import unittest import numpy as np import torch from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class _SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = DanceDiffusionPipeline lowerCamelCase__ = UNCONDITIONAL_AUDIO_GENERATION_PARAMS lowerCamelCase__ = PipelineTesterMixin.required_optional_params - { "callback", "latents", "callback_steps", "output_type", "num_images_per_prompt", } lowerCamelCase__ = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS lowerCamelCase__ = False lowerCamelCase__ = False def _snake_case ( self : str ): torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = UNetaDModel( block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=__lowerCamelCase , use_timestep_embedding=__lowerCamelCase , time_embedding_type="fourier" , mid_block_type="UNetMidBlock1D" , down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , ) SCREAMING_SNAKE_CASE = IPNDMScheduler() SCREAMING_SNAKE_CASE = { "unet": unet, "scheduler": scheduler, } return components def _snake_case ( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Dict=0 ): if str(__lowerCamelCase ).startswith("mps" ): SCREAMING_SNAKE_CASE = torch.manual_seed(__lowerCamelCase ) else: SCREAMING_SNAKE_CASE = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) SCREAMING_SNAKE_CASE = { "batch_size": 1, "generator": generator, "num_inference_steps": 4, } return inputs def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = "cpu" # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = DanceDiffusionPipeline(**__lowerCamelCase ) SCREAMING_SNAKE_CASE = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__lowerCamelCase ) SCREAMING_SNAKE_CASE = pipe(**__lowerCamelCase ) SCREAMING_SNAKE_CASE = output.audios SCREAMING_SNAKE_CASE = audio[0, -3:, -3:] assert audio.shape == (1, 2, components["unet"].sample_size) SCREAMING_SNAKE_CASE = np.array([-0.7_265, 1.0_000, -0.8_388, 0.1_175, 0.9_498, -1.0_000] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def _snake_case ( self : str ): return super().test_save_load_local() @skip_mps def _snake_case ( self : List[str] ): return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 ) @skip_mps def _snake_case ( self : Optional[Any] ): return super().test_save_load_optional_components() @skip_mps def _snake_case ( self : Any ): return super().test_attention_slicing_forward_pass() def _snake_case ( self : List[str] ): super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Tuple ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self : Optional[Any] ): SCREAMING_SNAKE_CASE = torch_device SCREAMING_SNAKE_CASE = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" ) SCREAMING_SNAKE_CASE = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = pipe(generator=__lowerCamelCase , num_inference_steps=100 , audio_length_in_s=4.096 ) SCREAMING_SNAKE_CASE = output.audios SCREAMING_SNAKE_CASE = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) SCREAMING_SNAKE_CASE = np.array([-0.0_192, -0.0_231, -0.0_318, -0.0_059, 0.0_002, -0.0_020] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2 def _snake_case ( self : Optional[Any] ): SCREAMING_SNAKE_CASE = torch_device SCREAMING_SNAKE_CASE = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = pipe(generator=__lowerCamelCase , num_inference_steps=100 , audio_length_in_s=4.096 ) SCREAMING_SNAKE_CASE = output.audios SCREAMING_SNAKE_CASE = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) SCREAMING_SNAKE_CASE = np.array([-0.0_367, -0.0_488, -0.0_771, -0.0_525, -0.0_444, -0.0_341] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
16
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert import BertTokenizer __A : str = logging.get_logger(__name__) __A : Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} __A : Tuple = { 'vocab_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } __A : Optional[Any] = { 'vocab_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } __A : str = { 'vocab_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json' ), }, } __A : Optional[Any] = { 'facebook/dpr-ctx_encoder-single-nq-base': 5_1_2, 'facebook/dpr-ctx_encoder-multiset-base': 5_1_2, } __A : List[str] = { 'facebook/dpr-question_encoder-single-nq-base': 5_1_2, 'facebook/dpr-question_encoder-multiset-base': 5_1_2, } __A : Any = { 'facebook/dpr-reader-single-nq-base': 5_1_2, 'facebook/dpr-reader-multiset-base': 5_1_2, } __A : str = { 'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True}, } __A : Any = { 'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True}, } __A : Dict = { 'facebook/dpr-reader-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-reader-multiset-base': {'do_lower_case': True}, } class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION __A : Optional[int] = collections.namedtuple( 'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text'] ) __A : List[Any] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits']) __A : List[Any] = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n ' @add_start_docstrings(__snake_case ) class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __call__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Union[bool, str] = False , __lowerCamelCase : Union[bool, str] = False , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : Optional[bool] = None , **__lowerCamelCase : Any , ): if titles is None and texts is None: return super().__call__( __lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , ) elif titles is None or texts is None: SCREAMING_SNAKE_CASE = titles if texts is None else texts return super().__call__( __lowerCamelCase , __lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , ) SCREAMING_SNAKE_CASE = titles if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [titles] SCREAMING_SNAKE_CASE = texts if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [texts] SCREAMING_SNAKE_CASE = len(__lowerCamelCase ) SCREAMING_SNAKE_CASE = questions if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [questions] * n_passages if len(__lowerCamelCase ) != len(__lowerCamelCase ): raise ValueError( f"There should be as many titles than texts but got {len(__lowerCamelCase )} titles and {len(__lowerCamelCase )} texts." ) SCREAMING_SNAKE_CASE = super().__call__(__lowerCamelCase , __lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase )["input_ids"] SCREAMING_SNAKE_CASE = super().__call__(__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase )["input_ids"] SCREAMING_SNAKE_CASE = { "input_ids": [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(__lowerCamelCase , __lowerCamelCase ) ] } if return_attention_mask is not False: SCREAMING_SNAKE_CASE = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) SCREAMING_SNAKE_CASE = attention_mask return self.pad(__lowerCamelCase , padding=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase ) def _snake_case ( self : Tuple , __lowerCamelCase : BatchEncoding , __lowerCamelCase : DPRReaderOutput , __lowerCamelCase : int = 16 , __lowerCamelCase : int = 64 , __lowerCamelCase : int = 4 , ): SCREAMING_SNAKE_CASE = reader_input["input_ids"] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = reader_output[:3] SCREAMING_SNAKE_CASE = len(__lowerCamelCase ) SCREAMING_SNAKE_CASE = sorted(range(__lowerCamelCase ) , reverse=__lowerCamelCase , key=relevance_logits.__getitem__ ) SCREAMING_SNAKE_CASE = [] for doc_id in sorted_docs: SCREAMING_SNAKE_CASE = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence SCREAMING_SNAKE_CASE = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: SCREAMING_SNAKE_CASE = sequence_ids.index(self.pad_token_id ) else: SCREAMING_SNAKE_CASE = len(__lowerCamelCase ) SCREAMING_SNAKE_CASE = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__lowerCamelCase , top_spans=__lowerCamelCase , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__lowerCamelCase , start_index=__lowerCamelCase , end_index=__lowerCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(__lowerCamelCase ) >= num_spans: break return nbest_spans_predictions[:num_spans] def _snake_case ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : List[int] , __lowerCamelCase : int , __lowerCamelCase : int , ): SCREAMING_SNAKE_CASE = [] for start_index, start_score in enumerate(__lowerCamelCase ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) SCREAMING_SNAKE_CASE = sorted(__lowerCamelCase , key=lambda __lowerCamelCase : x[1] , reverse=__lowerCamelCase ) SCREAMING_SNAKE_CASE = [] for (start_index, end_index), score in scores: if start_index > end_index: raise ValueError(f"Wrong span indices: [{start_index}:{end_index}]" ) SCREAMING_SNAKE_CASE = end_index - start_index + 1 if length > max_answer_length: raise ValueError(f"Span is too long: {length} > {max_answer_length}" ) if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(__lowerCamelCase ) == top_spans: break return chosen_span_intervals @add_end_docstrings(__snake_case ) class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ): '''simple docstring''' lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ = READER_PRETRAINED_INIT_CONFIGURATION lowerCamelCase__ = ["input_ids", "attention_mask"]
16
1
import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class _SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = DebertaTokenizer lowerCamelCase__ = True lowerCamelCase__ = DebertaTokenizerFast def _snake_case ( self : int ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt SCREAMING_SNAKE_CASE = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "[UNK]", ] SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) SCREAMING_SNAKE_CASE = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] SCREAMING_SNAKE_CASE = {"unk_token": "[UNK]"} SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(__lowerCamelCase ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(__lowerCamelCase ) ) def _snake_case ( self : int , **__lowerCamelCase : Optional[Any] ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def _snake_case ( self : str , __lowerCamelCase : Dict ): SCREAMING_SNAKE_CASE = "lower newer" SCREAMING_SNAKE_CASE = "lower newer" return input_text, output_text def _snake_case ( self : List[Any] ): SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = "lower newer" SCREAMING_SNAKE_CASE = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"] SCREAMING_SNAKE_CASE = tokenizer.tokenize(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = tokens + [tokenizer.unk_token] SCREAMING_SNAKE_CASE = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase ) def _snake_case ( self : str ): SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = tokenizer("Hello" , "World" ) SCREAMING_SNAKE_CASE = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd["token_type_ids"] , __lowerCamelCase ) @slow def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("microsoft/deberta-base" ) SCREAMING_SNAKE_CASE = tokenizer.encode("sequence builders" , add_special_tokens=__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer.encode( "sequence builders" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer.encode( "sequence builders" , "multi-sequence build" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def _snake_case ( self : List[str] ): SCREAMING_SNAKE_CASE = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained("microsoft/deberta-base" ) SCREAMING_SNAKE_CASE = [ "ALBERT: A Lite BERT for Self-supervised Learning of Language Representations", "ALBERT incorporates two parameter reduction techniques", "The first one is a factorized embedding parameterization. By decomposing the large vocabulary" " embedding matrix into two small matrices, we separate the size of the hidden layers from the size of" " vocabulary embedding.", ] SCREAMING_SNAKE_CASE = tokenizer(__lowerCamelCase , padding=__lowerCamelCase ) SCREAMING_SNAKE_CASE = [tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase ) for seq in encoding["input_ids"]] # fmt: off SCREAMING_SNAKE_CASE = { "input_ids": [ [1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2] ], "token_type_ids": [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], "attention_mask": [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on SCREAMING_SNAKE_CASE = [ "ALBERT: A Lite BERT for Self-supervised Learning of Language Representations", "ALBERT incorporates two parameter reduction techniques", "The first one is a factorized embedding parameterization. By decomposing the large vocabulary" " embedding matrix into two small matrices, we separate the size of the hidden layers from the size of" " vocabulary embedding.", ] self.assertDictEqual(encoding.data , __lowerCamelCase ) for expected, decoded in zip(__lowerCamelCase , __lowerCamelCase ): self.assertEqual(__lowerCamelCase , __lowerCamelCase )
16
from typing import Any import numpy as np def __a ( A__ : np.ndarray ): return np.array_equal(A__ , matrix.conjugate().T ) def __a ( A__ : np.ndarray , A__ : np.ndarray ): SCREAMING_SNAKE_CASE = v.conjugate().T SCREAMING_SNAKE_CASE = v_star.dot(A__ ) assert isinstance(A__ , np.ndarray ) return (v_star_dot.dot(A__ )) / (v_star.dot(A__ )) def __a ( ): SCREAMING_SNAKE_CASE = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] ) SCREAMING_SNAKE_CASE = np.array([[1], [2], [3]] ) assert is_hermitian(A__ ), F"{a} is not hermitian." print(rayleigh_quotient(A__ , A__ ) ) SCREAMING_SNAKE_CASE = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(A__ ), F"{a} is not hermitian." assert rayleigh_quotient(A__ , A__ ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
16
1
import inspect import unittest from transformers import ConvNextConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple=13 , __lowerCamelCase : Optional[Any]=32 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : List[str]=4 , __lowerCamelCase : str=[10, 20, 30, 40] , __lowerCamelCase : Any=[2, 2, 3, 2] , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : Any=37 , __lowerCamelCase : str="gelu" , __lowerCamelCase : List[str]=10 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : Tuple=["stage2", "stage3", "stage4"] , __lowerCamelCase : Union[str, Any]=[2, 3, 4] , __lowerCamelCase : int=None , ): SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = num_stages SCREAMING_SNAKE_CASE = hidden_sizes SCREAMING_SNAKE_CASE = depths SCREAMING_SNAKE_CASE = is_training SCREAMING_SNAKE_CASE = use_labels SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = num_labels SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = out_features SCREAMING_SNAKE_CASE = out_indices SCREAMING_SNAKE_CASE = scope def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE = None if self.use_labels: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels ) SCREAMING_SNAKE_CASE = self.get_config() return config, pixel_values, labels def _snake_case ( self : List[Any] ): return ConvNextConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def _snake_case ( self : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : int ): SCREAMING_SNAKE_CASE = ConvNextModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model(__lowerCamelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _snake_case ( self : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : int ): SCREAMING_SNAKE_CASE = ConvNextForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any ): SCREAMING_SNAKE_CASE = ConvNextBackbone(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model(__lowerCamelCase ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = ConvNextBackbone(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model(__lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = ( ( ConvNextModel, ConvNextForImageClassification, ConvNextBackbone, ) if is_torch_available() else () ) lowerCamelCase__ = ( {"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification} if is_torch_available() else {} ) lowerCamelCase__ = True lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False def _snake_case ( self : List[str] ): SCREAMING_SNAKE_CASE = ConvNextModelTester(self ) SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 ) def _snake_case ( self : Optional[int] ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _snake_case ( self : Dict ): return @unittest.skip(reason="ConvNext does not use inputs_embeds" ) def _snake_case ( self : Optional[int] ): pass @unittest.skip(reason="ConvNext does not support input and output embeddings" ) def _snake_case ( self : int ): pass @unittest.skip(reason="ConvNext does not use feedforward chunking" ) def _snake_case ( self : List[Any] ): pass def _snake_case ( self : str ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) SCREAMING_SNAKE_CASE = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def _snake_case ( self : List[str] ): SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def _snake_case ( self : Any ): SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__lowerCamelCase ) def _snake_case ( self : Tuple ): def check_hidden_states_output(__lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : List[str] ): SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states SCREAMING_SNAKE_CASE = self.model_tester.num_stages self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def _snake_case ( self : Optional[Any] ): SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def _snake_case ( self : Dict ): for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE = ConvNextModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def __a ( ): SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @cached_property def _snake_case ( self : Any ): return AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224" ) if is_vision_available() else None @slow def _snake_case ( self : Tuple ): SCREAMING_SNAKE_CASE = ConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224" ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.default_image_processor SCREAMING_SNAKE_CASE = prepare_img() SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ) # verify the logits SCREAMING_SNAKE_CASE = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) SCREAMING_SNAKE_CASE = torch.tensor([-0.0_260, -0.4_739, 0.1_911] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) ) @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase , __snake_case ): '''simple docstring''' lowerCamelCase__ = (ConvNextBackbone,) if is_torch_available() else () lowerCamelCase__ = ConvNextConfig lowerCamelCase__ = False def _snake_case ( self : Optional[Any] ): SCREAMING_SNAKE_CASE = ConvNextModelTester(self )
16
from __future__ import annotations __A : str = list[tuple[int, int]] __A : Optional[int] = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] __A : List[str] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : str , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float , __lowerCamelCase : Node | None , ): SCREAMING_SNAKE_CASE = pos_x SCREAMING_SNAKE_CASE = pos_y SCREAMING_SNAKE_CASE = (pos_y, pos_x) SCREAMING_SNAKE_CASE = goal_x SCREAMING_SNAKE_CASE = goal_y SCREAMING_SNAKE_CASE = g_cost SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = self.calculate_heuristic() def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = abs(self.pos_x - self.goal_x ) SCREAMING_SNAKE_CASE = abs(self.pos_y - self.goal_y ) return dx + dy def __lt__( self : Union[str, Any] , __lowerCamelCase : List[Any] ): return self.f_cost < other.f_cost class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Optional[int] , __lowerCamelCase : tuple[int, int] , __lowerCamelCase : tuple[int, int] ): SCREAMING_SNAKE_CASE = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __lowerCamelCase ) SCREAMING_SNAKE_CASE = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , __lowerCamelCase ) SCREAMING_SNAKE_CASE = [self.start] SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = False def _snake_case ( self : Optional[Any] ): while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() SCREAMING_SNAKE_CASE = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: SCREAMING_SNAKE_CASE = True return self.retrace_path(__lowerCamelCase ) self.closed_nodes.append(__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.get_successors(__lowerCamelCase ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(__lowerCamelCase ) else: # retrieve the best current path SCREAMING_SNAKE_CASE = self.open_nodes.pop(self.open_nodes.index(__lowerCamelCase ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(__lowerCamelCase ) else: self.open_nodes.append(__lowerCamelCase ) if not self.reached: return [self.start.pos] return None def _snake_case ( self : List[Any] , __lowerCamelCase : Node ): SCREAMING_SNAKE_CASE = [] for action in delta: SCREAMING_SNAKE_CASE = parent.pos_x + action[1] SCREAMING_SNAKE_CASE = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowerCamelCase ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( __lowerCamelCase , __lowerCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __lowerCamelCase , ) ) return successors def _snake_case ( self : str , __lowerCamelCase : Node | None ): SCREAMING_SNAKE_CASE = node SCREAMING_SNAKE_CASE = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) SCREAMING_SNAKE_CASE = current_node.parent path.reverse() return path if __name__ == "__main__": __A : Optional[Any] = (0, 0) __A : Optional[int] = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print('------') __A : List[str] = GreedyBestFirst(init, goal) __A : Tuple = greedy_bf.search() if path: for pos_x, pos_y in path: __A : Optional[Any] = 2 for elem in grid: print(elem)
16
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A : List[Any] = { 'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'], 'processing_git': ['GitProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : int = [ 'GIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'GitForCausalLM', 'GitModel', 'GitPreTrainedModel', 'GitVisionModel', ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys __A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
16
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES __A : int = logging.get_logger(__name__) __A : List[str] = OrderedDict( [ # Base model mapping ('albert', 'FlaxAlbertModel'), ('bart', 'FlaxBartModel'), ('beit', 'FlaxBeitModel'), ('bert', 'FlaxBertModel'), ('big_bird', 'FlaxBigBirdModel'), ('blenderbot', 'FlaxBlenderbotModel'), ('blenderbot-small', 'FlaxBlenderbotSmallModel'), ('clip', 'FlaxCLIPModel'), ('distilbert', 'FlaxDistilBertModel'), ('electra', 'FlaxElectraModel'), ('gpt-sw3', 'FlaxGPT2Model'), ('gpt2', 'FlaxGPT2Model'), ('gpt_neo', 'FlaxGPTNeoModel'), ('gptj', 'FlaxGPTJModel'), ('longt5', 'FlaxLongT5Model'), ('marian', 'FlaxMarianModel'), ('mbart', 'FlaxMBartModel'), ('mt5', 'FlaxMT5Model'), ('opt', 'FlaxOPTModel'), ('pegasus', 'FlaxPegasusModel'), ('regnet', 'FlaxRegNetModel'), ('resnet', 'FlaxResNetModel'), ('roberta', 'FlaxRobertaModel'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'), ('roformer', 'FlaxRoFormerModel'), ('t5', 'FlaxT5Model'), ('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'), ('vit', 'FlaxViTModel'), ('wav2vec2', 'FlaxWav2Vec2Model'), ('whisper', 'FlaxWhisperModel'), ('xglm', 'FlaxXGLMModel'), ('xlm-roberta', 'FlaxXLMRobertaModel'), ] ) __A : Optional[Any] = OrderedDict( [ # Model for pre-training mapping ('albert', 'FlaxAlbertForPreTraining'), ('bart', 'FlaxBartForConditionalGeneration'), ('bert', 'FlaxBertForPreTraining'), ('big_bird', 'FlaxBigBirdForPreTraining'), ('electra', 'FlaxElectraForPreTraining'), ('longt5', 'FlaxLongT5ForConditionalGeneration'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('mt5', 'FlaxMT5ForConditionalGeneration'), ('roberta', 'FlaxRobertaForMaskedLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'), ('roformer', 'FlaxRoFormerForMaskedLM'), ('t5', 'FlaxT5ForConditionalGeneration'), ('wav2vec2', 'FlaxWav2Vec2ForPreTraining'), ('whisper', 'FlaxWhisperForConditionalGeneration'), ('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'), ] ) __A : Tuple = OrderedDict( [ # Model for Masked LM mapping ('albert', 'FlaxAlbertForMaskedLM'), ('bart', 'FlaxBartForConditionalGeneration'), ('bert', 'FlaxBertForMaskedLM'), ('big_bird', 'FlaxBigBirdForMaskedLM'), ('distilbert', 'FlaxDistilBertForMaskedLM'), ('electra', 'FlaxElectraForMaskedLM'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('roberta', 'FlaxRobertaForMaskedLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'), ('roformer', 'FlaxRoFormerForMaskedLM'), ('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'), ] ) __A : Dict = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ('bart', 'FlaxBartForConditionalGeneration'), ('blenderbot', 'FlaxBlenderbotForConditionalGeneration'), ('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'), ('encoder-decoder', 'FlaxEncoderDecoderModel'), ('longt5', 'FlaxLongT5ForConditionalGeneration'), ('marian', 'FlaxMarianMTModel'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('mt5', 'FlaxMT5ForConditionalGeneration'), ('pegasus', 'FlaxPegasusForConditionalGeneration'), ('t5', 'FlaxT5ForConditionalGeneration'), ] ) __A : Any = OrderedDict( [ # Model for Image-classsification ('beit', 'FlaxBeitForImageClassification'), ('regnet', 'FlaxRegNetForImageClassification'), ('resnet', 'FlaxResNetForImageClassification'), ('vit', 'FlaxViTForImageClassification'), ] ) __A : Optional[int] = OrderedDict( [ ('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'), ] ) __A : Union[str, Any] = OrderedDict( [ # Model for Causal LM mapping ('bart', 'FlaxBartForCausalLM'), ('bert', 'FlaxBertForCausalLM'), ('big_bird', 'FlaxBigBirdForCausalLM'), ('electra', 'FlaxElectraForCausalLM'), ('gpt-sw3', 'FlaxGPT2LMHeadModel'), ('gpt2', 'FlaxGPT2LMHeadModel'), ('gpt_neo', 'FlaxGPTNeoForCausalLM'), ('gptj', 'FlaxGPTJForCausalLM'), ('opt', 'FlaxOPTForCausalLM'), ('roberta', 'FlaxRobertaForCausalLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'), ('xglm', 'FlaxXGLMForCausalLM'), ('xlm-roberta', 'FlaxXLMRobertaForCausalLM'), ] ) __A : Optional[int] = OrderedDict( [ # Model for Sequence Classification mapping ('albert', 'FlaxAlbertForSequenceClassification'), ('bart', 'FlaxBartForSequenceClassification'), ('bert', 'FlaxBertForSequenceClassification'), ('big_bird', 'FlaxBigBirdForSequenceClassification'), ('distilbert', 'FlaxDistilBertForSequenceClassification'), ('electra', 'FlaxElectraForSequenceClassification'), ('mbart', 'FlaxMBartForSequenceClassification'), ('roberta', 'FlaxRobertaForSequenceClassification'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'), ('roformer', 'FlaxRoFormerForSequenceClassification'), ('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'), ] ) __A : str = OrderedDict( [ # Model for Question Answering mapping ('albert', 'FlaxAlbertForQuestionAnswering'), ('bart', 'FlaxBartForQuestionAnswering'), ('bert', 'FlaxBertForQuestionAnswering'), ('big_bird', 'FlaxBigBirdForQuestionAnswering'), ('distilbert', 'FlaxDistilBertForQuestionAnswering'), ('electra', 'FlaxElectraForQuestionAnswering'), ('mbart', 'FlaxMBartForQuestionAnswering'), ('roberta', 'FlaxRobertaForQuestionAnswering'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'), ('roformer', 'FlaxRoFormerForQuestionAnswering'), ('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'), ] ) __A : Dict = OrderedDict( [ # Model for Token Classification mapping ('albert', 'FlaxAlbertForTokenClassification'), ('bert', 'FlaxBertForTokenClassification'), ('big_bird', 'FlaxBigBirdForTokenClassification'), ('distilbert', 'FlaxDistilBertForTokenClassification'), ('electra', 'FlaxElectraForTokenClassification'), ('roberta', 'FlaxRobertaForTokenClassification'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'), ('roformer', 'FlaxRoFormerForTokenClassification'), ('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'), ] ) __A : Dict = OrderedDict( [ # Model for Multiple Choice mapping ('albert', 'FlaxAlbertForMultipleChoice'), ('bert', 'FlaxBertForMultipleChoice'), ('big_bird', 'FlaxBigBirdForMultipleChoice'), ('distilbert', 'FlaxDistilBertForMultipleChoice'), ('electra', 'FlaxElectraForMultipleChoice'), ('roberta', 'FlaxRobertaForMultipleChoice'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'), ('roformer', 'FlaxRoFormerForMultipleChoice'), ('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'), ] ) __A : Any = OrderedDict( [ ('bert', 'FlaxBertForNextSentencePrediction'), ] ) __A : Optional[int] = OrderedDict( [ ('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'), ('whisper', 'FlaxWhisperForConditionalGeneration'), ] ) __A : List[str] = OrderedDict( [ ('whisper', 'FlaxWhisperForAudioClassification'), ] ) __A : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) __A : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) __A : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) __A : Tuple = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) __A : Any = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) __A : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) __A : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) __A : Optional[int] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) __A : List[str] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) __A : Union[str, Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) __A : List[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) __A : Optional[int] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) __A : List[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) __A : Tuple = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_MAPPING __A : Optional[int] = auto_class_update(FlaxAutoModel) class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING __A : Optional[Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining') class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING __A : Tuple = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling') class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING __A : List[Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling') class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING __A : int = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base' ) class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING __A : Optional[int] = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc='sequence classification' ) class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING __A : int = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering') class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING __A : List[Any] = auto_class_update( FlaxAutoModelForTokenClassification, head_doc='token classification' ) class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING __A : Any = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice') class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING __A : Union[str, Any] = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction' ) class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING __A : Optional[Any] = auto_class_update( FlaxAutoModelForImageClassification, head_doc='image classification' ) class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING __A : int = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling') class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING __A : Optional[int] = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling' )
16
1
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys __A : Any = '3' print('Python version:', sys.version) print('OS platform:', platform.platform()) print('OS architecture:', platform.machine()) try: import torch print('Torch version:', torch.__version__) print('Cuda available:', torch.cuda.is_available()) print('Cuda version:', torch.version.cuda) print('CuDNN version:', torch.backends.cudnn.version()) print('Number of GPUs available:', torch.cuda.device_count()) except ImportError: print('Torch version:', None) try: import transformers print('transformers version:', transformers.__version__) except ImportError: print('transformers version:', None)
16
def __a ( A__ : float , A__ : float ): if mass < 0: raise ValueError("The mass of a body cannot be negative" ) return 0.5 * mass * abs(A__ ) * abs(A__ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
16
1
import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask __A : str = logging.getLogger(__name__) class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' def __init__( self : Dict , __lowerCamelCase : List[str]=-1 ): # in NER datasets, the last column is usually reserved for NER label SCREAMING_SNAKE_CASE = label_idx def _snake_case ( self : str , __lowerCamelCase : Dict , __lowerCamelCase : Union[Split, str] ): if isinstance(__lowerCamelCase , __lowerCamelCase ): SCREAMING_SNAKE_CASE = mode.value SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , f"{mode}.txt" ) SCREAMING_SNAKE_CASE = 1 SCREAMING_SNAKE_CASE = [] with open(__lowerCamelCase , encoding="utf-8" ) as f: SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] for line in f: if line.startswith("-DOCSTART-" ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=__lowerCamelCase , labels=__lowerCamelCase ) ) guid_index += 1 SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] else: SCREAMING_SNAKE_CASE = line.split(" " ) words.append(splits[0] ) if len(__lowerCamelCase ) > 1: labels.append(splits[self.label_idx].replace("\n" , "" ) ) else: # Examples could have no label for mode = "test" labels.append("O" ) if words: examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=__lowerCamelCase , labels=__lowerCamelCase ) ) return examples def _snake_case ( self : Dict , __lowerCamelCase : TextIO , __lowerCamelCase : TextIO , __lowerCamelCase : List ): SCREAMING_SNAKE_CASE = 0 for line in test_input_reader: if line.startswith("-DOCSTART-" ) or line == "" or line == "\n": writer.write(__lowerCamelCase ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: SCREAMING_SNAKE_CASE = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n" writer.write(__lowerCamelCase ) else: logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] ) def _snake_case ( self : Dict , __lowerCamelCase : str ): if path: with open(__lowerCamelCase , "r" ) as f: SCREAMING_SNAKE_CASE = f.read().splitlines() if "O" not in labels: SCREAMING_SNAKE_CASE = ["O"] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' def __init__( self : Optional[Any] ): # in CONLL2003 dataset chunk column is second-to-last super().__init__(label_idx=-2 ) def _snake_case ( self : int , __lowerCamelCase : str ): if path: with open(__lowerCamelCase , "r" ) as f: SCREAMING_SNAKE_CASE = f.read().splitlines() if "O" not in labels: SCREAMING_SNAKE_CASE = ["O"] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' def _snake_case ( self : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[Split, str] ): if isinstance(__lowerCamelCase , __lowerCamelCase ): SCREAMING_SNAKE_CASE = mode.value SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , f"{mode}.txt" ) SCREAMING_SNAKE_CASE = 1 SCREAMING_SNAKE_CASE = [] with open(__lowerCamelCase , encoding="utf-8" ) as f: for sentence in parse_incr(__lowerCamelCase ): SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] for token in sentence: words.append(token["form"] ) labels.append(token["upos"] ) assert len(__lowerCamelCase ) == len(__lowerCamelCase ) if words: examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=__lowerCamelCase , labels=__lowerCamelCase ) ) guid_index += 1 return examples def _snake_case ( self : Union[str, Any] , __lowerCamelCase : TextIO , __lowerCamelCase : TextIO , __lowerCamelCase : List ): SCREAMING_SNAKE_CASE = 0 for sentence in parse_incr(__lowerCamelCase ): SCREAMING_SNAKE_CASE = preds_list[example_id] SCREAMING_SNAKE_CASE = "" for token in sentence: out += f"{token['form']} ({token['upos']}|{s_p.pop(0 )}) " out += "\n" writer.write(__lowerCamelCase ) example_id += 1 def _snake_case ( self : List[Any] , __lowerCamelCase : str ): if path: with open(__lowerCamelCase , "r" ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
16
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf __A : Dict = logging.get_logger(__name__) @dataclass class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = [ "no_inference", "no_cuda", "no_tpu", "no_speed", "no_memory", "no_env_print", "no_multi_process", ] def __init__( self : List[Any] , **__lowerCamelCase : Any ): for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: SCREAMING_SNAKE_CASE = deprecated_arg[3:] SCREAMING_SNAKE_CASE = not kwargs.pop(__lowerCamelCase ) logger.warning( f"{deprecated_arg} is depreciated. Please use --no-{positive_arg} or" f" {positive_arg}={kwargs[positive_arg]}" ) SCREAMING_SNAKE_CASE = kwargs.pop("tpu_name" , self.tpu_name ) SCREAMING_SNAKE_CASE = kwargs.pop("device_idx" , self.device_idx ) SCREAMING_SNAKE_CASE = kwargs.pop("eager_mode" , self.eager_mode ) SCREAMING_SNAKE_CASE = kwargs.pop("use_xla" , self.use_xla ) super().__init__(**__lowerCamelCase ) lowerCamelCase__ = field( default=__snake_case , metadata={"help": "Name of TPU"} , ) lowerCamelCase__ = field( default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , ) lowerCamelCase__ = field(default=__snake_case , metadata={"help": "Benchmark models in eager model."} ) lowerCamelCase__ = field( default=__snake_case , metadata={ "help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`." } , ) @cached_property def _snake_case ( self : Optional[int] ): requires_backends(self , ["tf"] ) SCREAMING_SNAKE_CASE = None if self.tpu: try: if self.tpu_name: SCREAMING_SNAKE_CASE = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: SCREAMING_SNAKE_CASE = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: SCREAMING_SNAKE_CASE = None return tpu @cached_property def _snake_case ( self : Any ): requires_backends(self , ["tf"] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) SCREAMING_SNAKE_CASE = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" ) SCREAMING_SNAKE_CASE = tf.distribute.OneDeviceStrategy(device=f"/gpu:{self.device_idx}" ) else: tf.config.set_visible_devices([] , "GPU" ) # disable GPU SCREAMING_SNAKE_CASE = tf.distribute.OneDeviceStrategy(device=f"/cpu:{self.device_idx}" ) return strategy @property def _snake_case ( self : Any ): requires_backends(self , ["tf"] ) return self._setup_tpu is not None @property def _snake_case ( self : Optional[Any] ): requires_backends(self , ["tf"] ) return self._setup_strategy @property def _snake_case ( self : List[str] ): requires_backends(self , ["tf"] ) return tf.config.list_physical_devices("GPU" ) @property def _snake_case ( self : Any ): requires_backends(self , ["tf"] ) if self.cuda: return len(self.gpu_list ) return 0 @property def _snake_case ( self : Dict ): return self.n_gpu > 0
16
1
import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def __a ( A__ : List[str]=32 , A__ : Dict=10 , A__ : int=100 , A__ : Optional[int]=1026 , A__ : Union[str, Any]=True , A__ : str="data/tokenized_stories_train_wikitext103.jbl" , A__ : Any="igf_context_pairs.jbl" , ): set_seed(3 ) # generate train_data and objective_set SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = generate_datasets( A__ , A__ , number=A__ , min_len=1026 , trim=A__ ) # keeps model same across runs set_seed(4 ) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? SCREAMING_SNAKE_CASE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" ) # load pretrained model SCREAMING_SNAKE_CASE = load_gpta("gpt2" ).to(A__ ) print("computing perplexity on objective set" ) SCREAMING_SNAKE_CASE = compute_perplexity(A__ , A__ , A__ ).item() print("perplexity on objective set:" , A__ ) # collect igf pairs and save to file demo.jbl collect_objective_set(A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def __a ( A__ : str , A__ : int=15 , A__ : Dict=128 , A__ : Dict=100 , A__ : List[str]="igf_model.pt" , ): set_seed(42 ) # Load pre-trained model SCREAMING_SNAKE_CASE = GPTaLMHeadModel.from_pretrained("gpt2" ) # Initialize secondary learner to use embedding weights of model SCREAMING_SNAKE_CASE = SecondaryLearner(A__ ) # Train secondary learner SCREAMING_SNAKE_CASE = train_secondary_learner( A__ , A__ , max_epochs=A__ , batch_size=A__ , eval_freq=100 , igf_model_path=A__ , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def __a ( A__ : Tuple , A__ : Any , A__ : Optional[int] , A__ : Union[str, Any]=32 , A__ : Any=1000 , A__ : List[Any]=16 , A__ : Tuple=1.0 , A__ : Union[str, Any]=recopy_gpta , A__ : Optional[int]=None , A__ : Optional[Any]=10 , A__ : Tuple="gpt2_finetuned.pt" , ): SCREAMING_SNAKE_CASE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" ) SCREAMING_SNAKE_CASE = RandomSampler(A__ ) SCREAMING_SNAKE_CASE = DataLoader(A__ , sampler=A__ ) SCREAMING_SNAKE_CASE = max_steps // (len(A__ )) + 1 SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = torch.zeros((1, context_len) , dtype=torch.long , device=A__ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = recopy_model(A__ , A__ , A__ ) model.train() if secondary_learner is not None: secondary_learner.to(A__ ) secondary_learner.eval() SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] # Compute the performance of the transformer model at the beginning SCREAMING_SNAKE_CASE = compute_perplexity(A__ , A__ , A__ ) test_perps.append(A__ ) print("Test perplexity, step" , A__ , ":" , A__ ) for epoch in range(int(A__ ) ): for step, example in enumerate(A__ ): torch.cuda.empty_cache() SCREAMING_SNAKE_CASE = random.randint(0 , example.size(2 ) - context_len - 1 ) SCREAMING_SNAKE_CASE = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() SCREAMING_SNAKE_CASE = model(A__ , labels=A__ ) SCREAMING_SNAKE_CASE = True if secondary_learner is not None: SCREAMING_SNAKE_CASE = secondary_learner.forward( torch.tensor(A__ , dtype=torch.long , device=A__ ).unsqueeze(0 ) )[0].item() observed_qs.append(float(A__ ) ) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 10: SCREAMING_SNAKE_CASE = -1 if predicted_q < threshold: SCREAMING_SNAKE_CASE = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu() ) ) SCREAMING_SNAKE_CASE = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() SCREAMING_SNAKE_CASE = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 ) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: SCREAMING_SNAKE_CASE = compute_perplexity(A__ , A__ , A__ ) test_perps.append(A__ ) print("Test perplexity, step" , A__ , ":" , A__ ) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 60: break if max_steps > 0 and global_step > 60: break # save finetuned transformer model torch.save(model.state_dict() , A__ ) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def __a ( ): SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" ) # Required parameters parser.add_argument( "--data_dir" , default=A__ , type=A__ , required=A__ , help="The input data dir. Should contain data files for WikiText." , ) parser.add_argument( "--model_name_or_path" , default=A__ , type=A__ , required=A__ , help="Path to pretrained model or model identifier from huggingface.co/models" , ) parser.add_argument( "--data_file" , type=A__ , default=A__ , help=( "A jbl file containing tokenized data which can be split as objective dataset, " "train_dataset and test_dataset." ) , ) parser.add_argument( "--igf_data_file" , type=A__ , default=A__ , help="A jbl file containing the context and information gain pairs to train secondary learner." , ) parser.add_argument( "--output_dir" , default=A__ , type=A__ , required=A__ , help="The output directory where the final fine-tuned model is stored." , ) parser.add_argument( "--tokenizer_name" , default=A__ , type=A__ , help="Pretrained tokenizer name or path if not the same as model_name" , ) parser.add_argument("--seed" , type=A__ , default=A__ , help="A seed for reproducible training." ) parser.add_argument( "--context_len" , default=32 , type=A__ , help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) , ) parser.add_argument( "--size_objective_set" , default=100 , type=A__ , help="number of articles that are long enough to be used as our objective set" , ) parser.add_argument( "--eval_freq" , default=100 , type=A__ , help="secondary model evaluation is triggered at eval_freq" ) parser.add_argument("--max_steps" , default=1000 , type=A__ , help="To calculate training epochs" ) parser.add_argument( "--secondary_learner_batch_size" , default=128 , type=A__ , help="batch size of training data for secondary learner" , ) parser.add_argument( "--batch_size" , default=16 , type=A__ , help="batch size of training data of language model(gpt2) " ) parser.add_argument( "--eval_interval" , default=10 , type=A__ , help=( "decay the selectivity of our secondary learner filter from" "1 standard deviation above average to 1 below average after 10 batches" ) , ) parser.add_argument( "--number" , default=100 , type=A__ , help="The number of examples split to be used as objective_set/test_data" ) parser.add_argument( "--min_len" , default=1026 , type=A__ , help="The minimum length of the article to be used as objective set" ) parser.add_argument( "--secondary_learner_max_epochs" , default=15 , type=A__ , help="number of epochs to train secondary learner" ) parser.add_argument("--trim" , default=A__ , type=A__ , help="truncate the example if it exceeds context length" ) parser.add_argument( "--threshold" , default=1.0 , type=A__ , help=( "The threshold value used by secondary learner to filter the train_data and allow only" " informative data as input to the model" ) , ) parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=A__ , help="finetuned_model_name" ) parser.add_argument( "--recopy_model" , default=A__ , type=A__ , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=A__ , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , ) # Load train data for secondary learner SCREAMING_SNAKE_CASE = joblib.load("data/IGF_values.jbl" ) # Train secondary learner SCREAMING_SNAKE_CASE = training_secondary_learner( A__ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="igf_model.pt" , ) # load pretrained gpt2 model SCREAMING_SNAKE_CASE = GPTaLMHeadModel.from_pretrained("gpt2" ) set_seed(42 ) # Generate train and test data to train and evaluate gpt2 model SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = generate_datasets( context_len=32 , file="data/tokenized_stories_train_wikitext103.jbl" , number=100 , min_len=1026 , trim=A__ ) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( A__ , A__ , A__ , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=A__ , secondary_learner=A__ , eval_interval=10 , finetuned_model_name="gpt2_finetuned.pt" , ) if __name__ == "__main__": main()
16
from collections.abc import Callable import numpy as np def __a ( A__ : Callable , A__ : float , A__ : float , A__ : float , A__ : float ): SCREAMING_SNAKE_CASE = int(np.ceil((x_end - xa) / step_size ) ) SCREAMING_SNAKE_CASE = np.zeros((n + 1,) ) SCREAMING_SNAKE_CASE = ya SCREAMING_SNAKE_CASE = xa for k in range(A__ ): SCREAMING_SNAKE_CASE = y[k] + step_size * ode_func(A__ , y[k] ) SCREAMING_SNAKE_CASE = y[k] + ( (step_size / 2) * (ode_func(A__ , y[k] ) + ode_func(x + step_size , A__ )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
16
1
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, nightly, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class _SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = LDMTextToImagePipeline lowerCamelCase__ = TEXT_TO_IMAGE_PARAMS - { "negative_prompt", "negative_prompt_embeds", "cross_attention_kwargs", "prompt_embeds", } lowerCamelCase__ = PipelineTesterMixin.required_optional_params - { "num_images_per_prompt", "callback", "callback_steps", } lowerCamelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS lowerCamelCase__ = False def _snake_case ( self : List[Any] ): torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) SCREAMING_SNAKE_CASE = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = AutoencoderKL( block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , latent_channels=4 , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) SCREAMING_SNAKE_CASE = CLIPTextModel(__lowerCamelCase ) SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) SCREAMING_SNAKE_CASE = { "unet": unet, "scheduler": scheduler, "vqvae": vae, "bert": text_encoder, "tokenizer": tokenizer, } return components def _snake_case ( self : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple=0 ): if str(__lowerCamelCase ).startswith("mps" ): SCREAMING_SNAKE_CASE = torch.manual_seed(__lowerCamelCase ) else: SCREAMING_SNAKE_CASE = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) SCREAMING_SNAKE_CASE = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def _snake_case ( self : List[str] ): SCREAMING_SNAKE_CASE = "cpu" # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = LDMTextToImagePipeline(**__lowerCamelCase ) pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__lowerCamelCase ) SCREAMING_SNAKE_CASE = pipe(**__lowerCamelCase ).images SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 16, 16, 3) SCREAMING_SNAKE_CASE = np.array([0.6_101, 0.6_156, 0.5_622, 0.4_895, 0.6_661, 0.3_804, 0.5_748, 0.6_136, 0.5_014] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @slow @require_torch_gpu class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : str ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any]=torch.floataa , __lowerCamelCase : List[Any]=0 ): SCREAMING_SNAKE_CASE = torch.manual_seed(__lowerCamelCase ) SCREAMING_SNAKE_CASE = np.random.RandomState(__lowerCamelCase ).standard_normal((1, 4, 32, 32) ) SCREAMING_SNAKE_CASE = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase , dtype=__lowerCamelCase ) SCREAMING_SNAKE_CASE = { "prompt": "A painting of a squirrel eating a burger", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.get_inputs(__lowerCamelCase ) SCREAMING_SNAKE_CASE = pipe(**__lowerCamelCase ).images SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 256, 256, 3) SCREAMING_SNAKE_CASE = np.array([0.51_825, 0.52_850, 0.52_543, 0.54_258, 0.52_304, 0.52_569, 0.54_363, 0.55_276, 0.56_878] ) SCREAMING_SNAKE_CASE = np.abs(expected_slice - image_slice ).max() assert max_diff < 1e-3 @nightly @require_torch_gpu class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : List[Any] ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : List[str]=torch.floataa , __lowerCamelCase : List[str]=0 ): SCREAMING_SNAKE_CASE = torch.manual_seed(__lowerCamelCase ) SCREAMING_SNAKE_CASE = np.random.RandomState(__lowerCamelCase ).standard_normal((1, 4, 32, 32) ) SCREAMING_SNAKE_CASE = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase , dtype=__lowerCamelCase ) SCREAMING_SNAKE_CASE = { "prompt": "A painting of a squirrel eating a burger", "latents": latents, "generator": generator, "num_inference_steps": 50, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def _snake_case ( self : Any ): SCREAMING_SNAKE_CASE = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.get_inputs(__lowerCamelCase ) SCREAMING_SNAKE_CASE = pipe(**__lowerCamelCase ).images[0] SCREAMING_SNAKE_CASE = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy" ) SCREAMING_SNAKE_CASE = np.abs(expected_image - image ).max() assert max_diff < 1e-3
16
def __a ( A__ : int ): if not isinstance(A__ , A__ ): raise ValueError("Input must be an integer" ) if input_num <= 0: raise ValueError("Input must be positive" ) return sum( divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 ) if __name__ == "__main__": import doctest doctest.testmod()
16
1
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging __A : Tuple = logging.get_logger(__name__) __A : Optional[int] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'} # See all LED models at https://huggingface.co/models?filter=LED __A : List[str] = { 'vocab_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json', }, 'merges_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt', }, 'tokenizer_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json', }, } __A : int = { 'allenai/led-base-16384': 1_6_3_8_4, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def __a ( ): SCREAMING_SNAKE_CASE = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) SCREAMING_SNAKE_CASE = bs[:] SCREAMING_SNAKE_CASE = 0 for b in range(2**8 ): if b not in bs: bs.append(A__ ) cs.append(2**8 + n ) n += 1 SCREAMING_SNAKE_CASE = [chr(A__ ) for n in cs] return dict(zip(A__ , A__ ) ) def __a ( A__ : str ): SCREAMING_SNAKE_CASE = set() SCREAMING_SNAKE_CASE = word[0] for char in word[1:]: pairs.add((prev_char, char) ) SCREAMING_SNAKE_CASE = char return pairs class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ = ["input_ids", "attention_mask"] def __init__( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any]="replace" , __lowerCamelCase : int="<s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : Optional[Any]="</s>" , __lowerCamelCase : Optional[int]="<s>" , __lowerCamelCase : str="<unk>" , __lowerCamelCase : List[str]="<pad>" , __lowerCamelCase : List[Any]="<mask>" , __lowerCamelCase : Tuple=False , **__lowerCamelCase : Union[str, Any] , ): SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token super().__init__( errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , ) with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle: SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase ) SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()} SCREAMING_SNAKE_CASE = errors # how to handle errors in decoding SCREAMING_SNAKE_CASE = bytes_to_unicode() SCREAMING_SNAKE_CASE = {v: k for k, v in self.byte_encoder.items()} with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle: SCREAMING_SNAKE_CASE = merges_handle.read().split("\n" )[1:-1] SCREAMING_SNAKE_CASE = [tuple(merge.split() ) for merge in bpe_merges] SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions SCREAMING_SNAKE_CASE = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def _snake_case ( self : int ): return len(self.encoder ) def _snake_case ( self : int ): return dict(self.encoder , **self.added_tokens_encoder ) def _snake_case ( self : Optional[Any] , __lowerCamelCase : List[str] ): if token in self.cache: return self.cache[token] SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase ) SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase ) if not pairs: return token while True: SCREAMING_SNAKE_CASE = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = bigram SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = 0 while i < len(__lowerCamelCase ): try: SCREAMING_SNAKE_CASE = word.index(__lowerCamelCase , __lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) SCREAMING_SNAKE_CASE = j if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase ) SCREAMING_SNAKE_CASE = new_word if len(__lowerCamelCase ) == 1: break else: SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase ) SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase ) SCREAMING_SNAKE_CASE = word return word def _snake_case ( self : int , __lowerCamelCase : List[Any] ): SCREAMING_SNAKE_CASE = [] for token in re.findall(self.pat , __lowerCamelCase ): SCREAMING_SNAKE_CASE = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) ) return bpe_tokens def _snake_case ( self : Dict , __lowerCamelCase : Union[str, Any] ): return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) ) def _snake_case ( self : List[Any] , __lowerCamelCase : int ): return self.decoder.get(__lowerCamelCase ) def _snake_case ( self : List[Any] , __lowerCamelCase : List[str] ): SCREAMING_SNAKE_CASE = "".join(__lowerCamelCase ) SCREAMING_SNAKE_CASE = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def _snake_case ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): if not os.path.isdir(__lowerCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return SCREAMING_SNAKE_CASE = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) SCREAMING_SNAKE_CASE = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" ) SCREAMING_SNAKE_CASE = 0 with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) SCREAMING_SNAKE_CASE = token_index writer.write(" ".join(__lowerCamelCase ) + "\n" ) index += 1 return vocab_file, merge_file def _snake_case ( self : Union[str, Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] SCREAMING_SNAKE_CASE = [self.cls_token_id] SCREAMING_SNAKE_CASE = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _snake_case ( self : Union[str, Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCamelCase )) + [1] return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1] def _snake_case ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): SCREAMING_SNAKE_CASE = [self.sep_token_id] SCREAMING_SNAKE_CASE = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self : Any , __lowerCamelCase : int , __lowerCamelCase : Tuple=False , **__lowerCamelCase : List[Any] ): SCREAMING_SNAKE_CASE = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()): SCREAMING_SNAKE_CASE = " " + text return (text, kwargs) def _snake_case ( self : List[str] , __lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ): SCREAMING_SNAKE_CASE = super()._pad( encoded_inputs=__lowerCamelCase , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , ) # Load from model defaults if return_attention_mask is None: SCREAMING_SNAKE_CASE = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: SCREAMING_SNAKE_CASE = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. SCREAMING_SNAKE_CASE = len(encoded_inputs["global_attention_mask"] ) != len(__lowerCamelCase ) if needs_to_be_padded: SCREAMING_SNAKE_CASE = len(__lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` SCREAMING_SNAKE_CASE = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": SCREAMING_SNAKE_CASE = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
16
from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent __A : List[Any] = {'UserAgent': UserAgent().random} def __a ( A__ : List[Any] ): SCREAMING_SNAKE_CASE = script.contents[0] SCREAMING_SNAKE_CASE = json.loads(data[data.find("{\"config\"" ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : List[str] , __lowerCamelCase : Optional[Any] ): SCREAMING_SNAKE_CASE = f"https://www.instagram.com/{username}/" SCREAMING_SNAKE_CASE = self.get_json() def _snake_case ( self : Tuple ): SCREAMING_SNAKE_CASE = requests.get(self.url , headers=__lowerCamelCase ).text SCREAMING_SNAKE_CASE = BeautifulSoup(__lowerCamelCase , "html.parser" ).find_all("script" ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__( self : Union[str, Any] ): return f"{self.__class__.__name__}('{self.username}')" def __str__( self : str ): return f"{self.fullname} ({self.username}) is {self.biography}" @property def _snake_case ( self : Optional[int] ): return self.user_data["username"] @property def _snake_case ( self : List[Any] ): return self.user_data["full_name"] @property def _snake_case ( self : List[str] ): return self.user_data["biography"] @property def _snake_case ( self : Tuple ): return self.user_data["business_email"] @property def _snake_case ( self : Optional[Any] ): return self.user_data["external_url"] @property def _snake_case ( self : int ): return self.user_data["edge_followed_by"]["count"] @property def _snake_case ( self : List[str] ): return self.user_data["edge_follow"]["count"] @property def _snake_case ( self : List[Any] ): return self.user_data["edge_owner_to_timeline_media"]["count"] @property def _snake_case ( self : Any ): return self.user_data["profile_pic_url_hd"] @property def _snake_case ( self : Optional[int] ): return self.user_data["is_verified"] @property def _snake_case ( self : Dict ): return self.user_data["is_private"] def __a ( A__ : str = "github" ): import os if os.environ.get("CI" ): return # test failing on GitHub Actions SCREAMING_SNAKE_CASE = InstagramUser(A__ ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , A__ ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 150 assert instagram_user.number_of_followers > 120000 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "[email protected]" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith("https://instagram." ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() __A : Dict = InstagramUser('github') print(instagram_user) print(f'{instagram_user.number_of_posts = }') print(f'{instagram_user.number_of_followers = }') print(f'{instagram_user.number_of_followings = }') print(f'{instagram_user.email = }') print(f'{instagram_user.website = }') print(f'{instagram_user.profile_picture_url = }') print(f'{instagram_user.is_verified = }') print(f'{instagram_user.is_private = }')
16
1
import warnings from ...utils import logging from .image_processing_dpt import DPTImageProcessor __A : List[str] = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' def __init__( self : str , *__lowerCamelCase : str , **__lowerCamelCase : Optional[Any] ): warnings.warn( "The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use DPTImageProcessor instead." , __lowerCamelCase , ) super().__init__(*__lowerCamelCase , **__lowerCamelCase )
16
import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __A : Any = logging.get_logger(__name__) __A : Any = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } __A : Optional[Any] = { 'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'}, 'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'}, 'tokenizer_config_file': { 'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json' }, } __A : Union[str, Any] = {'facebook/blenderbot-3B': 1_2_8} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def __a ( ): SCREAMING_SNAKE_CASE = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) SCREAMING_SNAKE_CASE = bs[:] SCREAMING_SNAKE_CASE = 0 for b in range(2**8 ): if b not in bs: bs.append(A__ ) cs.append(2**8 + n ) n += 1 SCREAMING_SNAKE_CASE = [chr(A__ ) for n in cs] return dict(zip(A__ , A__ ) ) def __a ( A__ : List[Any] ): SCREAMING_SNAKE_CASE = set() SCREAMING_SNAKE_CASE = word[0] for char in word[1:]: pairs.add((prev_char, char) ) SCREAMING_SNAKE_CASE = char return pairs class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ = ["input_ids", "attention_mask"] def __init__( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any="replace" , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : Optional[Any]="</s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : Union[str, Any]="<s>" , __lowerCamelCase : List[str]="<unk>" , __lowerCamelCase : Optional[Any]="<pad>" , __lowerCamelCase : Dict="<mask>" , __lowerCamelCase : Any=False , **__lowerCamelCase : Optional[Any] , ): SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token super().__init__( errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , ) with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle: SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase ) SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()} SCREAMING_SNAKE_CASE = errors # how to handle errors in decoding SCREAMING_SNAKE_CASE = bytes_to_unicode() SCREAMING_SNAKE_CASE = {v: k for k, v in self.byte_encoder.items()} with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle: SCREAMING_SNAKE_CASE = merges_handle.read().split("\n" )[1:-1] SCREAMING_SNAKE_CASE = [tuple(merge.split() ) for merge in bpe_merges] SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions SCREAMING_SNAKE_CASE = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def _snake_case ( self : str ): return len(self.encoder ) def _snake_case ( self : Union[str, Any] ): return dict(self.encoder , **self.added_tokens_encoder ) def _snake_case ( self : Dict , __lowerCamelCase : List[Any] ): if token in self.cache: return self.cache[token] SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase ) SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase ) if not pairs: return token while True: SCREAMING_SNAKE_CASE = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = bigram SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = 0 while i < len(__lowerCamelCase ): try: SCREAMING_SNAKE_CASE = word.index(__lowerCamelCase , __lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) SCREAMING_SNAKE_CASE = j if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase ) SCREAMING_SNAKE_CASE = new_word if len(__lowerCamelCase ) == 1: break else: SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase ) SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase ) SCREAMING_SNAKE_CASE = word return word def _snake_case ( self : Optional[Any] , __lowerCamelCase : List[Any] ): SCREAMING_SNAKE_CASE = [] for token in re.findall(self.pat , __lowerCamelCase ): SCREAMING_SNAKE_CASE = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) ) return bpe_tokens def _snake_case ( self : Tuple , __lowerCamelCase : Dict ): return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) ) def _snake_case ( self : Any , __lowerCamelCase : Optional[int] ): return self.decoder.get(__lowerCamelCase ) def _snake_case ( self : Optional[int] , __lowerCamelCase : List[Any] ): SCREAMING_SNAKE_CASE = "".join(__lowerCamelCase ) SCREAMING_SNAKE_CASE = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def _snake_case ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): if not os.path.isdir(__lowerCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return SCREAMING_SNAKE_CASE = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) SCREAMING_SNAKE_CASE = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" ) SCREAMING_SNAKE_CASE = 0 with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) SCREAMING_SNAKE_CASE = token_index writer.write(" ".join(__lowerCamelCase ) + "\n" ) index += 1 return vocab_file, merge_file def _snake_case ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCamelCase )) + [1] return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1] def _snake_case ( self : Optional[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): SCREAMING_SNAKE_CASE = [self.sep_token_id] SCREAMING_SNAKE_CASE = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Any=False , **__lowerCamelCase : Union[str, Any] ): SCREAMING_SNAKE_CASE = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()): SCREAMING_SNAKE_CASE = " " + text return (text, kwargs) def _snake_case ( self : Any , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): return token_ids_a + [self.eos_token_id] def _snake_case ( self : int , __lowerCamelCase : "Conversation" ): SCREAMING_SNAKE_CASE = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(" " + text ) else: # Generated responses should contain them already. inputs.append(__lowerCamelCase ) SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.encode(__lowerCamelCase ) if len(__lowerCamelCase ) > self.model_max_length: SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :] logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." ) return input_ids
16
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def __init__( self : str , __lowerCamelCase : Dict , __lowerCamelCase : List[Any]=7 , __lowerCamelCase : Any=3 , __lowerCamelCase : Any=30 , __lowerCamelCase : Any=400 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Optional[int]=0.9 , __lowerCamelCase : Dict=None , __lowerCamelCase : Dict=True , __lowerCamelCase : List[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : Dict=[0.5, 0.5, 0.5] , ): SCREAMING_SNAKE_CASE = size if size is not None else {"shortest_edge": 30} SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"height": 30, "width": 30} SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = min_resolution SCREAMING_SNAKE_CASE = max_resolution SCREAMING_SNAKE_CASE = do_resize_and_center_crop SCREAMING_SNAKE_CASE = size SCREAMING_SNAKE_CASE = crop_pct SCREAMING_SNAKE_CASE = crop_size SCREAMING_SNAKE_CASE = do_normalize SCREAMING_SNAKE_CASE = image_mean SCREAMING_SNAKE_CASE = image_std def _snake_case ( self : Dict ): return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class _SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = PoolFormerImageProcessor if is_vision_available() else None def _snake_case ( self : List[Any] ): SCREAMING_SNAKE_CASE = PoolFormerImageProcessingTester(self ) @property def _snake_case ( self : Optional[int] ): return self.image_processor_tester.prepare_image_processor_dict() def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCamelCase , "do_resize_and_center_crop" ) ) self.assertTrue(hasattr(__lowerCamelCase , "size" ) ) self.assertTrue(hasattr(__lowerCamelCase , "crop_pct" ) ) self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) ) self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) ) self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) ) def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 30} ) self.assertEqual(image_processor.crop_size , {"height": 30, "width": 30} ) SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"shortest_edge": 42} ) self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} ) def _snake_case ( self : List[str] ): pass def _snake_case ( self : List[Any] ): # Initialize image_processing SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched SCREAMING_SNAKE_CASE = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def _snake_case ( self : Optional[int] ): # Initialize image_processing SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , np.ndarray ) # Test not batched input SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched SCREAMING_SNAKE_CASE = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def _snake_case ( self : str ): # Initialize image_processing SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched SCREAMING_SNAKE_CASE = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , )
16
import argparse import json import math import os import time import traceback import zipfile from collections import Counter import requests def __a ( A__ : str , A__ : List[Any]=None ): SCREAMING_SNAKE_CASE = None if token is not None: SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"} SCREAMING_SNAKE_CASE = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100" SCREAMING_SNAKE_CASE = requests.get(A__ , headers=A__ ).json() SCREAMING_SNAKE_CASE = {} try: job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} ) SCREAMING_SNAKE_CASE = math.ceil((result["total_count"] - 100) / 100 ) for i in range(A__ ): SCREAMING_SNAKE_CASE = requests.get(url + F"&page={i + 2}" , headers=A__ ).json() job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} ) return job_links except Exception: print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" ) return {} def __a ( A__ : List[Any] , A__ : Optional[int]=None ): SCREAMING_SNAKE_CASE = None if token is not None: SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"} SCREAMING_SNAKE_CASE = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100" SCREAMING_SNAKE_CASE = requests.get(A__ , headers=A__ ).json() SCREAMING_SNAKE_CASE = {} try: artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} ) SCREAMING_SNAKE_CASE = math.ceil((result["total_count"] - 100) / 100 ) for i in range(A__ ): SCREAMING_SNAKE_CASE = requests.get(url + F"&page={i + 2}" , headers=A__ ).json() artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} ) return artifacts except Exception: print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" ) return {} def __a ( A__ : Any , A__ : str , A__ : List[str] , A__ : Union[str, Any] ): SCREAMING_SNAKE_CASE = None if token is not None: SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"} SCREAMING_SNAKE_CASE = requests.get(A__ , headers=A__ , allow_redirects=A__ ) SCREAMING_SNAKE_CASE = result.headers["Location"] SCREAMING_SNAKE_CASE = requests.get(A__ , allow_redirects=A__ ) SCREAMING_SNAKE_CASE = os.path.join(A__ , F"{artifact_name}.zip" ) with open(A__ , "wb" ) as fp: fp.write(response.content ) def __a ( A__ : List[Any] , A__ : List[Any]=None ): SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = None with zipfile.ZipFile(A__ ) as z: for filename in z.namelist(): if not os.path.isdir(A__ ): # read the file if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]: with z.open(A__ ) as f: for line in f: SCREAMING_SNAKE_CASE = line.decode("UTF-8" ).strip() if filename == "failures_line.txt": try: # `error_line` is the place where `error` occurs SCREAMING_SNAKE_CASE = line[: line.index(": " )] SCREAMING_SNAKE_CASE = line[line.index(": " ) + len(": " ) :] errors.append([error_line, error] ) except Exception: # skip un-related lines pass elif filename == "summary_short.txt" and line.startswith("FAILED " ): # `test` is the test method that failed SCREAMING_SNAKE_CASE = line[len("FAILED " ) :] failed_tests.append(A__ ) elif filename == "job_name.txt": SCREAMING_SNAKE_CASE = line if len(A__ ) != len(A__ ): raise ValueError( F"`errors` and `failed_tests` should have the same number of elements. Got {len(A__ )} for `errors` " F"and {len(A__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some" " problem." ) SCREAMING_SNAKE_CASE = None if job_name and job_links: SCREAMING_SNAKE_CASE = job_links.get(A__ , A__ ) # A list with elements of the form (line of error, error, failed test) SCREAMING_SNAKE_CASE = [x + [y] + [job_link] for x, y in zip(A__ , A__ )] return result def __a ( A__ : Union[str, Any] , A__ : Union[str, Any]=None ): SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [os.path.join(A__ , A__ ) for p in os.listdir(A__ ) if p.endswith(".zip" )] for p in paths: errors.extend(get_errors_from_single_artifact(A__ , job_links=A__ ) ) return errors def __a ( A__ : List[str] , A__ : Tuple=None ): SCREAMING_SNAKE_CASE = Counter() counter.update([x[1] for x in logs] ) SCREAMING_SNAKE_CASE = counter.most_common() SCREAMING_SNAKE_CASE = {} for error, count in counts: if error_filter is None or error not in error_filter: SCREAMING_SNAKE_CASE = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]} SCREAMING_SNAKE_CASE = dict(sorted(r.items() , key=lambda A__ : item[1]["count"] , reverse=A__ ) ) return r def __a ( A__ : str ): SCREAMING_SNAKE_CASE = test.split("::" )[0] if test.startswith("tests/models/" ): SCREAMING_SNAKE_CASE = test.split("/" )[2] else: SCREAMING_SNAKE_CASE = None return test def __a ( A__ : List[str] , A__ : Dict=None ): SCREAMING_SNAKE_CASE = [(x[0], x[1], get_model(x[2] )) for x in logs] SCREAMING_SNAKE_CASE = [x for x in logs if x[2] is not None] SCREAMING_SNAKE_CASE = {x[2] for x in logs} SCREAMING_SNAKE_CASE = {} for test in tests: SCREAMING_SNAKE_CASE = Counter() # count by errors in `test` counter.update([x[1] for x in logs if x[2] == test] ) SCREAMING_SNAKE_CASE = counter.most_common() SCREAMING_SNAKE_CASE = {error: count for error, count in counts if (error_filter is None or error not in error_filter)} SCREAMING_SNAKE_CASE = sum(error_counts.values() ) if n_errors > 0: SCREAMING_SNAKE_CASE = {"count": n_errors, "errors": error_counts} SCREAMING_SNAKE_CASE = dict(sorted(r.items() , key=lambda A__ : item[1]["count"] , reverse=A__ ) ) return r def __a ( A__ : Dict ): SCREAMING_SNAKE_CASE = "| no. | error | status |" SCREAMING_SNAKE_CASE = "|-:|:-|:-|" SCREAMING_SNAKE_CASE = [header, sep] for error in reduced_by_error: SCREAMING_SNAKE_CASE = reduced_by_error[error]["count"] SCREAMING_SNAKE_CASE = F"| {count} | {error[:100]} | |" lines.append(A__ ) return "\n".join(A__ ) def __a ( A__ : Optional[Any] ): SCREAMING_SNAKE_CASE = "| model | no. of errors | major error | count |" SCREAMING_SNAKE_CASE = "|-:|-:|-:|-:|" SCREAMING_SNAKE_CASE = [header, sep] for model in reduced_by_model: SCREAMING_SNAKE_CASE = reduced_by_model[model]["count"] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = list(reduced_by_model[model]["errors"].items() )[0] SCREAMING_SNAKE_CASE = F"| {model} | {count} | {error[:60]} | {_count} |" lines.append(A__ ) return "\n".join(A__ ) if __name__ == "__main__": __A : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.') parser.add_argument( '--output_dir', type=str, required=True, help='Where to store the downloaded artifacts and other result files.', ) parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.') __A : Union[str, Any] = parser.parse_args() os.makedirs(args.output_dir, exist_ok=True) __A : int = get_job_links(args.workflow_run_id, token=args.token) __A : Dict = {} # To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee. # For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`. if _job_links: for k, v in _job_links.items(): # This is how GitHub actions combine job names. if " / " in k: __A : Union[str, Any] = k.find(' / ') __A : Optional[int] = k[index + len(' / ') :] __A : Optional[int] = v with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp: json.dump(job_links, fp, ensure_ascii=False, indent=4) __A : int = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) for idx, (name, url) in enumerate(artifacts.items()): download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) __A : Optional[int] = get_all_errors(args.output_dir, job_links=job_links) # `e[1]` is the error __A : Dict = Counter() counter.update([e[1] for e in errors]) # print the top 30 most common test errors __A : Optional[Any] = counter.most_common(3_0) for item in most_common: print(item) with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp: json.dump(errors, fp, ensure_ascii=False, indent=4) __A : str = reduce_by_error(errors) __A : int = reduce_by_model(errors) __A : Any = make_github_table(reduced_by_error) __A : List[str] = make_github_table_per_model(reduced_by_model) with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp: fp.write(sa) with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp: fp.write(sa)
16
1
import baseaa def __a ( A__ : str ): return baseaa.aaaencode(string.encode("utf-8" ) ) def __a ( A__ : bytes ): return baseaa.aaadecode(A__ ).decode("utf-8" ) if __name__ == "__main__": import doctest doctest.testmod()
16
from ...configuration_utils import PretrainedConfig from ...utils import logging __A : Optional[int] = logging.get_logger(__name__) __A : Optional[Any] = { 'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json', # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = "gpt_neox" def __init__( self : Optional[int] , __lowerCamelCase : List[str]=50432 , __lowerCamelCase : int=6144 , __lowerCamelCase : Optional[Any]=44 , __lowerCamelCase : Tuple=64 , __lowerCamelCase : Optional[int]=24576 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Any=0.25 , __lowerCamelCase : List[Any]=10000 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : int=0.0 , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[Any]=2048 , __lowerCamelCase : Tuple=0.02 , __lowerCamelCase : Tuple=1e-5 , __lowerCamelCase : Dict=True , __lowerCamelCase : int=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : List[str]=False , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : str , ): super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = rotary_pct SCREAMING_SNAKE_CASE = rotary_emb_base SCREAMING_SNAKE_CASE = attention_dropout SCREAMING_SNAKE_CASE = hidden_dropout SCREAMING_SNAKE_CASE = classifier_dropout SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = use_cache SCREAMING_SNAKE_CASE = tie_word_embeddings SCREAMING_SNAKE_CASE = use_parallel_residual SCREAMING_SNAKE_CASE = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( "The hidden size is not divisble by the number of attention heads! Make sure to update them!" ) def _snake_case ( self : Union[str, Any] ): if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __lowerCamelCase ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " f"got {self.rope_scaling}" ) SCREAMING_SNAKE_CASE = self.rope_scaling.get("type" , __lowerCamelCase ) SCREAMING_SNAKE_CASE = self.rope_scaling.get("factor" , __lowerCamelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" ) if rope_scaling_factor is None or not isinstance(__lowerCamelCase , __lowerCamelCase ) or rope_scaling_factor <= 1.0: raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
16
1
from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class _SCREAMING_SNAKE_CASE : '''simple docstring''' lowerCamelCase__ = LEDConfig lowerCamelCase__ = {} lowerCamelCase__ = "gelu" def __init__( self : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str]=13 , __lowerCamelCase : str=7 , __lowerCamelCase : Any=True , __lowerCamelCase : str=False , __lowerCamelCase : Optional[Any]=99 , __lowerCamelCase : Any=32 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : str=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Any=20 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Dict=4 , ): SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = seq_length SCREAMING_SNAKE_CASE = is_training SCREAMING_SNAKE_CASE = use_labels SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = eos_token_id SCREAMING_SNAKE_CASE = pad_token_id SCREAMING_SNAKE_CASE = bos_token_id SCREAMING_SNAKE_CASE = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after SCREAMING_SNAKE_CASE = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests SCREAMING_SNAKE_CASE = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def _snake_case ( self : str ): SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor] , axis=1 ) SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , ) SCREAMING_SNAKE_CASE = prepare_led_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = tf.concat( [tf.zeros_like(__lowerCamelCase )[:, :-1], tf.ones_like(__lowerCamelCase )[:, -1:]] , axis=-1 , ) SCREAMING_SNAKE_CASE = global_attention_mask return config, inputs_dict def _snake_case ( self : int , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] ): SCREAMING_SNAKE_CASE = TFLEDModel(config=__lowerCamelCase ).get_decoder() SCREAMING_SNAKE_CASE = inputs_dict["input_ids"] SCREAMING_SNAKE_CASE = input_ids[:1, :] SCREAMING_SNAKE_CASE = inputs_dict["attention_mask"][:1, :] SCREAMING_SNAKE_CASE = 1 # first forward pass SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase , use_cache=__lowerCamelCase ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size ) SCREAMING_SNAKE_CASE = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and SCREAMING_SNAKE_CASE = tf.concat([input_ids, next_tokens] , axis=-1 ) SCREAMING_SNAKE_CASE = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0] SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice SCREAMING_SNAKE_CASE = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx] SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__lowerCamelCase , __lowerCamelCase , rtol=1e-3 ) def __a ( A__ : int , A__ : Dict , A__ : List[str] , A__ : int=None , A__ : List[Any]=None , A__ : Optional[int]=None , A__ : Dict=None , ): if attention_mask is None: SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(A__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: SCREAMING_SNAKE_CASE = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: SCREAMING_SNAKE_CASE = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () lowerCamelCase__ = (TFLEDForConditionalGeneration,) if is_tf_available() else () lowerCamelCase__ = ( { "conversational": TFLEDForConditionalGeneration, "feature-extraction": TFLEDModel, "summarization": TFLEDForConditionalGeneration, "text2text-generation": TFLEDForConditionalGeneration, "translation": TFLEDForConditionalGeneration, } if is_tf_available() else {} ) lowerCamelCase__ = True lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False def _snake_case ( self : str ): SCREAMING_SNAKE_CASE = TFLEDModelTester(self ) SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__lowerCamelCase ) def _snake_case ( self : Tuple ): self.config_tester.run_common_tests() def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__lowerCamelCase ) def _snake_case ( self : Any ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE = tf.zeros_like(inputs_dict["attention_mask"] ) SCREAMING_SNAKE_CASE = 2 SCREAMING_SNAKE_CASE = tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , ) SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = self.model_tester.seq_length SCREAMING_SNAKE_CASE = self.model_tester.encoder_seq_length def check_decoder_attentions_output(__lowerCamelCase : Tuple ): SCREAMING_SNAKE_CASE = outputs.decoder_attentions self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) def check_encoder_attentions_output(__lowerCamelCase : Dict ): SCREAMING_SNAKE_CASE = [t.numpy() for t in outputs.encoder_attentions] SCREAMING_SNAKE_CASE = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers ) self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) self.assertListEqual( list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , ) for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) SCREAMING_SNAKE_CASE = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) SCREAMING_SNAKE_CASE = len(__lowerCamelCase ) self.assertEqual(config.output_hidden_states , __lowerCamelCase ) check_encoder_attentions_output(__lowerCamelCase ) if self.is_encoder_decoder: SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) SCREAMING_SNAKE_CASE = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) self.assertEqual(config.output_hidden_states , __lowerCamelCase ) check_decoder_attentions_output(__lowerCamelCase ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) SCREAMING_SNAKE_CASE = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) self.assertEqual(config.output_hidden_states , __lowerCamelCase ) check_encoder_attentions_output(__lowerCamelCase ) # Check attention is always last and order is fine SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) SCREAMING_SNAKE_CASE = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__lowerCamelCase ) ) self.assertEqual(model.config.output_hidden_states , __lowerCamelCase ) check_encoder_attentions_output(__lowerCamelCase ) @unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." ) def _snake_case ( self : List[Any] ): pass def _snake_case ( self : List[str] ): # TODO: Head-masking not yet implement pass def __a ( A__ : Any ): return tf.constant(A__ , dtype=tf.intaa ) __A : Optional[int] = 1e-4 @slow @require_tf class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Tuple ): SCREAMING_SNAKE_CASE = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led # change to intended input here SCREAMING_SNAKE_CASE = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] ) SCREAMING_SNAKE_CASE = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] ) SCREAMING_SNAKE_CASE = prepare_led_inputs_dict(model.config , __lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = model(**__lowerCamelCase )[0] SCREAMING_SNAKE_CASE = (1, 1024, 768) self.assertEqual(output.shape , __lowerCamelCase ) # change to expected output here SCREAMING_SNAKE_CASE = tf.convert_to_tensor( [[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , ) tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1e-3 ) def _snake_case ( self : Any ): SCREAMING_SNAKE_CASE = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ) # change to intended input here SCREAMING_SNAKE_CASE = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] ) SCREAMING_SNAKE_CASE = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] ) SCREAMING_SNAKE_CASE = prepare_led_inputs_dict(model.config , __lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = model(**__lowerCamelCase )[0] SCREAMING_SNAKE_CASE = (1, 1024, model.config.vocab_size) self.assertEqual(output.shape , __lowerCamelCase ) # change to expected output here SCREAMING_SNAKE_CASE = tf.convert_to_tensor( [[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , ) tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1e-3 , rtol=1e-3 )
16
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A : List[Any] = { 'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'], 'processing_git': ['GitProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : int = [ 'GIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'GitForCausalLM', 'GitModel', 'GitPreTrainedModel', 'GitVisionModel', ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys __A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
16
1
import argparse import json import os from pathlib import Path import requests import torch from transformers import JukeboxConfig, JukeboxModel from transformers.utils import logging logging.set_verbosity_info() __A : str = logging.get_logger(__name__) __A : str = 'https://openaipublic.azureedge.net/jukebox/models/' __A : Optional[int] = { 'jukebox-1b-lyrics': [ '5b/vqvae.pth.tar', '5b/prior_level_0.pth.tar', '5b/prior_level_1.pth.tar', '1b_lyrics/prior_level_2.pth.tar', ], 'jukebox-5b-lyrics': [ '5b/vqvae.pth.tar', '5b/prior_level_0.pth.tar', '5b/prior_level_1.pth.tar', '5b_lyrics/prior_level_2.pth.tar', ], } def __a ( A__ : Union[str, Any] ): if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10: SCREAMING_SNAKE_CASE = key.replace(".model.1.bias" , ".conv1d_1.bias" ) elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10: SCREAMING_SNAKE_CASE = key.replace(".model.1.weight" , ".conv1d_1.weight" ) elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10: SCREAMING_SNAKE_CASE = key.replace(".model.3.bias" , ".conv1d_2.bias" ) elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10: SCREAMING_SNAKE_CASE = key.replace(".model.3.weight" , ".conv1d_2.weight" ) if "conditioner_blocks.0." in key: SCREAMING_SNAKE_CASE = key.replace("conditioner_blocks.0" , "conditioner_blocks" ) if "prime_prior" in key: SCREAMING_SNAKE_CASE = key.replace("prime_prior" , "encoder" ) if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key: SCREAMING_SNAKE_CASE = key.replace(".emb." , "." ) if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook return key.replace(".k" , ".codebook" ) if "y_emb." in key: return key.replace("y_emb." , "metadata_embedding." ) if "x_emb.emb." in key: SCREAMING_SNAKE_CASE = key.replace("0.x_emb.emb" , "embed_tokens" ) if "prime_state_ln" in key: return key.replace("prime_state_ln" , "encoder.final_layer_norm" ) if ".ln" in key: return key.replace(".ln" , ".layer_norm" ) if "_ln" in key: return key.replace("_ln" , "_layer_norm" ) if "prime_state_proj" in key: return key.replace("prime_state_proj" , "encoder.proj_in" ) if "prime_x_out" in key: return key.replace("prime_x_out" , "encoder.lm_head" ) if "prior.x_out" in key: return key.replace("x_out" , "fc_proj_out" ) if "x_emb" in key: return key.replace("x_emb" , "embed_tokens" ) return key def __a ( A__ : List[str] , A__ : Optional[Any] , A__ : List[Any] , A__ : Union[str, Any] ): SCREAMING_SNAKE_CASE = {} import re SCREAMING_SNAKE_CASE = re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" ) SCREAMING_SNAKE_CASE = re.compile( R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" ) SCREAMING_SNAKE_CASE = re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" ) SCREAMING_SNAKE_CASE = re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" ) SCREAMING_SNAKE_CASE = re.compile( R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" ) SCREAMING_SNAKE_CASE = re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" ) SCREAMING_SNAKE_CASE = re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" ) SCREAMING_SNAKE_CASE = re.compile( R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" ) SCREAMING_SNAKE_CASE = re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" ) for original_key, value in state_dict.items(): # rename vqvae.encoder keys if re_encoder_block_conv_in.fullmatch(A__ ): SCREAMING_SNAKE_CASE = re_encoder_block_conv_in.match(A__ ) SCREAMING_SNAKE_CASE = regex_match.groups() SCREAMING_SNAKE_CASE = int(groups[2] ) * 2 + int(groups[3] ) SCREAMING_SNAKE_CASE = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}" SCREAMING_SNAKE_CASE = re_encoder_block_conv_in.sub(A__ , A__ ) elif re_encoder_block_resnet.fullmatch(A__ ): SCREAMING_SNAKE_CASE = re_encoder_block_resnet.match(A__ ) SCREAMING_SNAKE_CASE = regex_match.groups() SCREAMING_SNAKE_CASE = int(groups[2] ) * 2 + int(groups[3] ) SCREAMING_SNAKE_CASE = {"1": 1, "3": 2}[groups[-2]] SCREAMING_SNAKE_CASE = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}." SCREAMING_SNAKE_CASE = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}" SCREAMING_SNAKE_CASE = prefix + resnet_block SCREAMING_SNAKE_CASE = re_encoder_block_resnet.sub(A__ , A__ ) elif re_encoder_block_proj_out.fullmatch(A__ ): SCREAMING_SNAKE_CASE = re_encoder_block_proj_out.match(A__ ) SCREAMING_SNAKE_CASE = regex_match.groups() SCREAMING_SNAKE_CASE = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}" SCREAMING_SNAKE_CASE = re_encoder_block_proj_out.sub(A__ , A__ ) # rename vqvae.decoder keys elif re_decoder_block_conv_out.fullmatch(A__ ): SCREAMING_SNAKE_CASE = re_decoder_block_conv_out.match(A__ ) SCREAMING_SNAKE_CASE = regex_match.groups() SCREAMING_SNAKE_CASE = int(groups[2] ) * 2 + int(groups[3] ) - 2 SCREAMING_SNAKE_CASE = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}" SCREAMING_SNAKE_CASE = re_decoder_block_conv_out.sub(A__ , A__ ) elif re_decoder_block_resnet.fullmatch(A__ ): SCREAMING_SNAKE_CASE = re_decoder_block_resnet.match(A__ ) SCREAMING_SNAKE_CASE = regex_match.groups() SCREAMING_SNAKE_CASE = int(groups[2] ) * 2 + int(groups[3] ) - 2 SCREAMING_SNAKE_CASE = {"1": 1, "3": 2}[groups[-2]] SCREAMING_SNAKE_CASE = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}." SCREAMING_SNAKE_CASE = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}" SCREAMING_SNAKE_CASE = prefix + resnet_block SCREAMING_SNAKE_CASE = re_decoder_block_resnet.sub(A__ , A__ ) elif re_decoder_block_proj_in.fullmatch(A__ ): SCREAMING_SNAKE_CASE = re_decoder_block_proj_in.match(A__ ) SCREAMING_SNAKE_CASE = regex_match.groups() SCREAMING_SNAKE_CASE = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}" SCREAMING_SNAKE_CASE = re_decoder_block_proj_in.sub(A__ , A__ ) # rename prior cond.model to upsampler.upsample_block and resnet elif re_prior_cond_conv_out.fullmatch(A__ ): SCREAMING_SNAKE_CASE = re_prior_cond_conv_out.match(A__ ) SCREAMING_SNAKE_CASE = regex_match.groups() SCREAMING_SNAKE_CASE = int(groups[1] ) * 2 + int(groups[2] ) - 2 SCREAMING_SNAKE_CASE = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}" SCREAMING_SNAKE_CASE = re_prior_cond_conv_out.sub(A__ , A__ ) elif re_prior_cond_resnet.fullmatch(A__ ): SCREAMING_SNAKE_CASE = re_prior_cond_resnet.match(A__ ) SCREAMING_SNAKE_CASE = regex_match.groups() SCREAMING_SNAKE_CASE = int(groups[1] ) * 2 + int(groups[2] ) - 2 SCREAMING_SNAKE_CASE = {"1": 1, "3": 2}[groups[-2]] SCREAMING_SNAKE_CASE = F"conditioner_blocks.upsampler.upsample_block.{block_index}." SCREAMING_SNAKE_CASE = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}" SCREAMING_SNAKE_CASE = prefix + resnet_block SCREAMING_SNAKE_CASE = re_prior_cond_resnet.sub(A__ , A__ ) elif re_prior_cond_proj_in.fullmatch(A__ ): SCREAMING_SNAKE_CASE = re_prior_cond_proj_in.match(A__ ) SCREAMING_SNAKE_CASE = regex_match.groups() SCREAMING_SNAKE_CASE = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}" SCREAMING_SNAKE_CASE = re_prior_cond_proj_in.sub(A__ , A__ ) # keep original key else: SCREAMING_SNAKE_CASE = original_key SCREAMING_SNAKE_CASE = replace_key(A__ ) if F"{key_prefix}.{key}" not in model_state_dict or key is None: print(F"failed converting {original_key} to {key}, does not match" ) # handle missmatched shape elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape: SCREAMING_SNAKE_CASE = model_state_dict[F"{key_prefix}.{key}"] print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" ) SCREAMING_SNAKE_CASE = original_key SCREAMING_SNAKE_CASE = original_key SCREAMING_SNAKE_CASE = value return new_dict @torch.no_grad() def __a ( A__ : Tuple=None , A__ : List[Any]=None ): for file in MODEL_MAPPING[model_name]: if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ): SCREAMING_SNAKE_CASE = requests.get(F"{PREFIX}{file}" , allow_redirects=A__ ) os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=A__ ) open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , "wb" ).write(r.content ) SCREAMING_SNAKE_CASE = MODEL_MAPPING[model_name.split("/" )[-1]] SCREAMING_SNAKE_CASE = JukeboxConfig.from_pretrained(A__ ) SCREAMING_SNAKE_CASE = JukeboxModel(A__ ) SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = {} for i, dict_name in enumerate(A__ ): SCREAMING_SNAKE_CASE = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )["model"] SCREAMING_SNAKE_CASE = {} for k in old_dic.keys(): if k.endswith(".b" ): SCREAMING_SNAKE_CASE = old_dic[k] elif k.endswith(".w" ): SCREAMING_SNAKE_CASE = old_dic[k] elif "level_2" not in dict_name and "cond.model." in k: SCREAMING_SNAKE_CASE = old_dic[k] else: SCREAMING_SNAKE_CASE = old_dic[k] SCREAMING_SNAKE_CASE = "vqvae" if i == 0 else F"priors.{3 - i}" SCREAMING_SNAKE_CASE = fix_jukebox_keys(A__ , model.state_dict() , A__ , A__ ) weight_dict.append(A__ ) SCREAMING_SNAKE_CASE = weight_dict.pop(0 ) model.vqvae.load_state_dict(A__ ) for i in range(len(A__ ) ): model.priors[i].load_state_dict(weight_dict[2 - i] ) Path(A__ ).mkdir(exist_ok=A__ ) with open(F"{pytorch_dump_folder_path}/mapping.json" , "w" ) as txtfile: json.dump(A__ , A__ ) print(F"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(A__ ) return weight_dict if __name__ == "__main__": __A : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='jukebox-5b-lyrics', type=str, help='Name of the model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default='jukebox-5b-lyrics-converted', type=str, help='Path to the output PyTorch model directory.', ) __A : List[Any] = parser.parse_args() convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
16
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __A : Union[str, Any] = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) __A : List[Any] = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias')) rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias')) rename_keys.append( (f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append( ( f'transformer.decoder.layers.{i}.cross_attn.out_proj.weight', f'decoder.layers.{i}.encoder_attn.out_proj.weight', ) ) rename_keys.append( ( f'transformer.decoder.layers.{i}.cross_attn.out_proj.bias', f'decoder.layers.{i}.encoder_attn.out_proj.bias', ) ) rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias')) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (f'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', f'decoder.layers.{i}.sa_qcontent_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', f'decoder.layers.{i}.sa_kcontent_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.sa_qpos_proj.weight', f'decoder.layers.{i}.sa_qpos_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.sa_kpos_proj.weight', f'decoder.layers.{i}.sa_kpos_proj.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.weight', f'decoder.layers.{i}.sa_v_proj.weight')) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', f'decoder.layers.{i}.ca_qcontent_proj.weight') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', f'decoder.layers.{i}.ca_kcontent_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_kpos_proj.weight', f'decoder.layers.{i}.ca_kpos_proj.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.weight', f'decoder.layers.{i}.ca_v_proj.weight')) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', f'decoder.layers.{i}.ca_qpos_sine_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', f'decoder.layers.{i}.sa_qcontent_proj.bias') ) rename_keys.append( (f'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', f'decoder.layers.{i}.sa_kcontent_proj.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.sa_qpos_proj.bias', f'decoder.layers.{i}.sa_qpos_proj.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.sa_kpos_proj.bias', f'decoder.layers.{i}.sa_kpos_proj.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.bias', f'decoder.layers.{i}.sa_v_proj.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', f'decoder.layers.{i}.ca_qcontent_proj.bias') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', f'decoder.layers.{i}.ca_kcontent_proj.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.ca_kpos_proj.bias', f'decoder.layers.{i}.ca_kpos_proj.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.bias', f'decoder.layers.{i}.ca_v_proj.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', f'decoder.layers.{i}.ca_qpos_sine_proj.bias') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ('input_proj.weight', 'input_projection.weight'), ('input_proj.bias', 'input_projection.bias'), ('query_embed.weight', 'query_position_embeddings.weight'), ('transformer.decoder.norm.weight', 'decoder.layernorm.weight'), ('transformer.decoder.norm.bias', 'decoder.layernorm.bias'), ('class_embed.weight', 'class_labels_classifier.weight'), ('class_embed.bias', 'class_labels_classifier.bias'), ('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'), ('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'), ('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'), ('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'), ('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'), ('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'), ('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'), ('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'), ('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'), ('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'), ('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'), ('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'), ('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'), ('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'), ('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'), ('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'), ] ) def __a ( A__ : Dict , A__ : Dict , A__ : Any ): SCREAMING_SNAKE_CASE = state_dict.pop(A__ ) SCREAMING_SNAKE_CASE = val def __a ( A__ : Optional[int] ): SCREAMING_SNAKE_CASE = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: SCREAMING_SNAKE_CASE = key.replace("backbone.0.body" , "backbone.conv_encoder.model" ) SCREAMING_SNAKE_CASE = value else: SCREAMING_SNAKE_CASE = value return new_state_dict def __a ( A__ : Optional[Any] , A__ : Tuple=False ): SCREAMING_SNAKE_CASE = "" if is_panoptic: SCREAMING_SNAKE_CASE = "conditional_detr." # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" ) SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE = in_proj_weight[:256, :] SCREAMING_SNAKE_CASE = in_proj_bias[:256] SCREAMING_SNAKE_CASE = in_proj_weight[256:512, :] SCREAMING_SNAKE_CASE = in_proj_bias[256:512] SCREAMING_SNAKE_CASE = in_proj_weight[-256:, :] SCREAMING_SNAKE_CASE = in_proj_bias[-256:] def __a ( ): SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg" SCREAMING_SNAKE_CASE = Image.open(requests.get(A__ , stream=A__ ).raw ) return im @torch.no_grad() def __a ( A__ : List[str] , A__ : Union[str, Any] ): SCREAMING_SNAKE_CASE = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: SCREAMING_SNAKE_CASE = "resnet101" if "dc5" in model_name: SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = "panoptic" in model_name if is_panoptic: SCREAMING_SNAKE_CASE = 250 else: SCREAMING_SNAKE_CASE = 91 SCREAMING_SNAKE_CASE = "huggingface/label-files" SCREAMING_SNAKE_CASE = "coco-detection-id2label.json" SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(A__ , A__ , repo_type="dataset" ) , "r" ) ) SCREAMING_SNAKE_CASE = {int(A__ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE = idalabel SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} # load image processor SCREAMING_SNAKE_CASE = "coco_panoptic" if is_panoptic else "coco_detection" SCREAMING_SNAKE_CASE = ConditionalDetrImageProcessor(format=A__ ) # prepare image SCREAMING_SNAKE_CASE = prepare_img() SCREAMING_SNAKE_CASE = image_processor(images=A__ , return_tensors="pt" ) SCREAMING_SNAKE_CASE = encoding["pixel_values"] logger.info(F"Converting model {model_name}..." ) # load original model from torch hub SCREAMING_SNAKE_CASE = torch.hub.load("DeppMeng/ConditionalDETR" , A__ , pretrained=A__ ).eval() SCREAMING_SNAKE_CASE = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: SCREAMING_SNAKE_CASE = "conditional_detr." + src rename_key(A__ , A__ , A__ ) SCREAMING_SNAKE_CASE = rename_backbone_keys(A__ ) # query, key and value matrices need special treatment read_in_q_k_v(A__ , is_panoptic=A__ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them SCREAMING_SNAKE_CASE = "conditional_detr.model." if is_panoptic else "model." for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("conditional_detr" ) and not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ) ): SCREAMING_SNAKE_CASE = state_dict.pop(A__ ) SCREAMING_SNAKE_CASE = val elif "class_labels_classifier" in key or "bbox_predictor" in key: SCREAMING_SNAKE_CASE = state_dict.pop(A__ ) SCREAMING_SNAKE_CASE = val elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ): continue else: SCREAMING_SNAKE_CASE = state_dict.pop(A__ ) SCREAMING_SNAKE_CASE = val else: if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ): SCREAMING_SNAKE_CASE = state_dict.pop(A__ ) SCREAMING_SNAKE_CASE = val # finally, create HuggingFace model and load state dict SCREAMING_SNAKE_CASE = ConditionalDetrForSegmentation(A__ ) if is_panoptic else ConditionalDetrForObjectDetection(A__ ) model.load_state_dict(A__ ) model.eval() model.push_to_hub(repo_id=A__ , organization="DepuMeng" , commit_message="Add model" ) # verify our conversion SCREAMING_SNAKE_CASE = conditional_detr(A__ ) SCREAMING_SNAKE_CASE = model(A__ ) assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 ) # Save model and image processor logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." ) Path(A__ ).mkdir(exist_ok=A__ ) model.save_pretrained(A__ ) image_processor.save_pretrained(A__ ) if __name__ == "__main__": __A : str = argparse.ArgumentParser() parser.add_argument( '--model_name', default='conditional_detr_resnet50', type=str, help='Name of the CONDITIONAL_DETR model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) __A : int = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
16
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __A : int = { 'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'], 'tokenization_roc_bert': ['RoCBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = [ 'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'RoCBertForCausalLM', 'RoCBertForMaskedLM', 'RoCBertForMultipleChoice', 'RoCBertForPreTraining', 'RoCBertForQuestionAnswering', 'RoCBertForSequenceClassification', 'RoCBertForTokenClassification', 'RoCBertLayer', 'RoCBertModel', 'RoCBertPreTrainedModel', 'load_tf_weights_in_roc_bert', ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys __A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
16
from __future__ import annotations def __a ( A__ : list[int | str] ): create_state_space_tree(A__ , [] , 0 , [0 for i in range(len(A__ ) )] ) def __a ( A__ : list[int | str] , A__ : list[int | str] , A__ : int , A__ : list[int] , ): if index == len(A__ ): print(A__ ) return for i in range(len(A__ ) ): if not index_used[i]: current_sequence.append(sequence[i] ) SCREAMING_SNAKE_CASE = True create_state_space_tree(A__ , A__ , index + 1 , A__ ) current_sequence.pop() SCREAMING_SNAKE_CASE = False __A : list[int | str] = [3, 1, 2, 4] generate_all_permutations(sequence) __A : list[int | str] = ["A", "B", "C"] generate_all_permutations(sequence_a)
16
1
import unittest from transformers import AutoTokenizer, FalconConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, ) class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : int=3 , __lowerCamelCase : List[str]=7 , __lowerCamelCase : int=True , __lowerCamelCase : Dict=True , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : str=True , __lowerCamelCase : Optional[int]=99 , __lowerCamelCase : Any=32 , __lowerCamelCase : Tuple=5 , __lowerCamelCase : str=4 , __lowerCamelCase : str=37 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Any=512 , __lowerCamelCase : Tuple=16 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : Optional[int]=0.02 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Dict=4 , __lowerCamelCase : Optional[Any]=None , ): SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = seq_length SCREAMING_SNAKE_CASE = is_training SCREAMING_SNAKE_CASE = use_input_mask SCREAMING_SNAKE_CASE = use_token_type_ids SCREAMING_SNAKE_CASE = use_labels SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = type_vocab_size SCREAMING_SNAKE_CASE = type_sequence_label_size SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = num_labels SCREAMING_SNAKE_CASE = num_choices SCREAMING_SNAKE_CASE = scope def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE = None if self.use_input_mask: SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None if self.use_labels: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _snake_case ( self : str ): return FalconConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=__lowerCamelCase , ) def _snake_case ( self : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] ): SCREAMING_SNAKE_CASE = FalconModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase ) SCREAMING_SNAKE_CASE = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case ( self : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , ): SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = FalconModel(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model( __lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , ) SCREAMING_SNAKE_CASE = model( __lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , ) SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , ): SCREAMING_SNAKE_CASE = FalconForCausalLM(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , ): SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = FalconForCausalLM(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() # first forward pass SCREAMING_SNAKE_CASE = model( __lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , use_cache=__lowerCamelCase , ) SCREAMING_SNAKE_CASE = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size ) SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 ) SCREAMING_SNAKE_CASE = torch.cat([input_mask, next_mask] , dim=-1 ) SCREAMING_SNAKE_CASE = model( __lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , output_hidden_states=__lowerCamelCase , )["hidden_states"][0] SCREAMING_SNAKE_CASE = model( __lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , output_hidden_states=__lowerCamelCase , )["hidden_states"][0] # select random slice SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item() SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach() SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) ) def _snake_case ( self : List[Any] ): SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ) = config_and_inputs SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = ( ( FalconModel, FalconForCausalLM, FalconForSequenceClassification, FalconForTokenClassification, FalconForQuestionAnswering, ) if is_torch_available() else () ) lowerCamelCase__ = (FalconForCausalLM,) if is_torch_available() else () lowerCamelCase__ = ( { "feature-extraction": FalconModel, "text-classification": FalconForSequenceClassification, "text-generation": FalconForCausalLM, "question-answering": FalconForQuestionAnswering, "token-classification": FalconForTokenClassification, "zero-shot": FalconForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase__ = False lowerCamelCase__ = False def _snake_case ( self : List[Any] ): SCREAMING_SNAKE_CASE = FalconModelTester(self ) SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 ) def _snake_case ( self : int ): self.config_tester.run_common_tests() def _snake_case ( self : List[Any] ): SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() for alibi in [True, False]: SCREAMING_SNAKE_CASE = alibi self.model_tester.create_and_check_model(__lowerCamelCase , *__lowerCamelCase ) def _snake_case ( self : str ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE = 3 SCREAMING_SNAKE_CASE = input_dict["input_ids"] SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) SCREAMING_SNAKE_CASE = FalconForSequenceClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def _snake_case ( self : Any ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE = 3 SCREAMING_SNAKE_CASE = "single_label_classification" SCREAMING_SNAKE_CASE = input_dict["input_ids"] SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) SCREAMING_SNAKE_CASE = FalconForSequenceClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def _snake_case ( self : Any ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE = input_dict["input_ids"] SCREAMING_SNAKE_CASE = FalconForCausalLM(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model(__lowerCamelCase , use_cache=__lowerCamelCase ) SCREAMING_SNAKE_CASE = input_ids.shape[0] SCREAMING_SNAKE_CASE = model._convert_to_rw_cache(result.past_key_values ) SCREAMING_SNAKE_CASE = model._convert_cache_to_standard_format(__lowerCamelCase , __lowerCamelCase ) for layer in range(len(__lowerCamelCase ) ): for tensor_idx in range(2 ): self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 ) self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 ) self.assertTrue( torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) ) def _snake_case ( self : List[str] ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE = 3 SCREAMING_SNAKE_CASE = "multi_label_classification" SCREAMING_SNAKE_CASE = input_dict["input_ids"] SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) SCREAMING_SNAKE_CASE = FalconForSequenceClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def _snake_case ( self : List[str] ): # Falcon can have different numbers of KV-heads than the number of query heads, so we need # to override this test to use the right head counts. for model_class in self.all_generative_model_classes: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() # If it doesn't support cache, pass the test if not hasattr(__lowerCamelCase , "use_cache" ): return SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ).to(__lowerCamelCase ) if "use_cache" not in inputs: SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ) # If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format) if "past_key_values" not in outputs: return SCREAMING_SNAKE_CASE = ( getattr(__lowerCamelCase , "decoder_layers" , __lowerCamelCase ) or getattr(__lowerCamelCase , "num_decoder_layers" , __lowerCamelCase ) or config.num_hidden_layers ) SCREAMING_SNAKE_CASE = getattr(__lowerCamelCase , "num_kv_heads" , config.num_attention_heads ) SCREAMING_SNAKE_CASE = getattr(__lowerCamelCase , "d_model" , config.hidden_size ) SCREAMING_SNAKE_CASE = embed_dim // num_attention_heads SCREAMING_SNAKE_CASE = outputs["past_key_values"] self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = inputs["input_ids"].shape for i in range(__lowerCamelCase ): if config.new_decoder_architecture: SCREAMING_SNAKE_CASE = config.num_attention_heads elif config.multi_query: SCREAMING_SNAKE_CASE = 1 self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2 self.assertEqual( past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) self.assertEqual( past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @slow def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("Rocketknight1/falcon-rw-1b" ) SCREAMING_SNAKE_CASE = FalconForCausalLM.from_pretrained("Rocketknight1/falcon-rw-1b" ) model.eval() model.to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer("My favorite food is" , return_tensors="pt" ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = ( "My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday." ) SCREAMING_SNAKE_CASE = model.generate(**__lowerCamelCase , do_sample=__lowerCamelCase , max_new_tokens=19 ) SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__lowerCamelCase )[0] self.assertEqual(__lowerCamelCase , __lowerCamelCase ) @slow def _snake_case ( self : Union[str, Any] ): # The big models are way too big for the CI, so we use tiny random models that resemble their # architectures but with much smaller and fewer layers for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]: SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = FalconForCausalLM.from_pretrained(__lowerCamelCase ) model.eval() model.to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer("My favorite food is" , return_tensors="pt" ).to(__lowerCamelCase ) # We just test that these run without errors - the models are randomly initialized # and so the actual text outputs will be garbage model.generate(**__lowerCamelCase , do_sample=__lowerCamelCase , max_new_tokens=4 ) model.generate(**__lowerCamelCase , do_sample=__lowerCamelCase , max_new_tokens=4 ) model.generate(**__lowerCamelCase , num_beams=2 , max_new_tokens=4 ) @slow def _snake_case ( self : int ): # The big models are way too big for the CI, so we use tiny random models that resemble their # architectures but with much smaller and fewer layers with torch.no_grad(): for repo in [ "Rocketknight1/falcon-rw-1b", "Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b", ]: SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = FalconForCausalLM.from_pretrained(__lowerCamelCase ) model.eval() model.to(device=__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer("My favorite food is" , return_tensors="pt" ).to(__lowerCamelCase ) # Test results are the same with and without cache SCREAMING_SNAKE_CASE = model.generate(**__lowerCamelCase , do_sample=__lowerCamelCase , max_new_tokens=20 , use_cache=__lowerCamelCase ) SCREAMING_SNAKE_CASE = model.generate(**__lowerCamelCase , do_sample=__lowerCamelCase , max_new_tokens=20 , use_cache=__lowerCamelCase ) self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
16
def __a ( A__ : int = 1000 ): SCREAMING_SNAKE_CASE = 3 SCREAMING_SNAKE_CASE = 0 while a < n: if a % 3 == 0 or a % 5 == 0: result += a elif a % 15 == 0: result -= a a += 1 return result if __name__ == "__main__": print(f'{solution() = }')
16
1
import inspect import logging import os import random import shutil import tempfile import unittest import pytest import torch from torch import nn from torch.utils.data import DataLoader, TensorDataset from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_cuda from accelerate.utils import ProjectConfiguration, set_seed __A : Any = logging.getLogger(__name__) def __a ( A__ : str=2 , A__ : str=3 , A__ : Optional[int]=16 , A__ : int = 10 , A__ : int = 2 ): def get_dataset(A__ : Tuple ): SCREAMING_SNAKE_CASE = torch.randn(batch_size * n_batches , 1 ) return TensorDataset(A__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) ) SCREAMING_SNAKE_CASE = get_dataset(A__ ) SCREAMING_SNAKE_CASE = get_dataset(A__ ) SCREAMING_SNAKE_CASE = DataLoader(A__ , shuffle=A__ , batch_size=A__ , num_workers=4 ) SCREAMING_SNAKE_CASE = DataLoader(A__ , shuffle=A__ , batch_size=A__ , num_workers=4 ) return (train_dataloader, valid_dataloader) def __a ( A__ : Any , A__ : List[str] , A__ : int , A__ : str , A__ : Optional[Any] , A__ : Union[str, Any]=None ): SCREAMING_SNAKE_CASE = [] for epoch in range(A__ ): # Train quickly model.train() for batch in dataloader: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = batch SCREAMING_SNAKE_CASE = model(A__ ) SCREAMING_SNAKE_CASE = torch.nn.functional.mse_loss(A__ , A__ ) accelerator.backward(A__ ) optimizer.step() optimizer.zero_grad() rands.append(random.random() ) # Introduce some randomness if scheduler is not None: scheduler.step() return rands class _SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' def __init__( self : Dict ): super().__init__() SCREAMING_SNAKE_CASE = nn.Parameter(torch.randn(1 ) ) SCREAMING_SNAKE_CASE = nn.Parameter(torch.randn(1 ) ) def _snake_case ( self : Union[str, Any] , __lowerCamelCase : List[Any] ): return x * self.a + self.b class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : List[str] ): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) SCREAMING_SNAKE_CASE = DummyModel() SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1e-3 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = dummy_dataloaders() SCREAMING_SNAKE_CASE = ProjectConfiguration(total_limit=1 , project_dir=__lowerCamelCase , automatic_checkpoint_naming=__lowerCamelCase ) # Train baseline SCREAMING_SNAKE_CASE = Accelerator(project_config=__lowerCamelCase ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Save initial accelerator.save_state() # Save second state accelerator.save_state() self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 ) def _snake_case ( self : List[Any] ): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) SCREAMING_SNAKE_CASE = DummyModel() SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1e-3 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = dummy_dataloaders() # Train baseline SCREAMING_SNAKE_CASE = Accelerator() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Save initial SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , "initial" ) accelerator.save_state(__lowerCamelCase ) ((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item() SCREAMING_SNAKE_CASE = optimizer.state_dict() SCREAMING_SNAKE_CASE = train(3 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) ((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item() SCREAMING_SNAKE_CASE = optimizer.state_dict() # Train partially set_seed(42 ) SCREAMING_SNAKE_CASE = DummyModel() SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1e-3 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = dummy_dataloaders() SCREAMING_SNAKE_CASE = Accelerator() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) accelerator.load_state(__lowerCamelCase ) ((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item() SCREAMING_SNAKE_CASE = optimizer.state_dict() self.assertEqual(__lowerCamelCase , __lowerCamelCase ) self.assertEqual(__lowerCamelCase , __lowerCamelCase ) self.assertEqual(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = train(2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Save everything SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , "checkpoint" ) accelerator.save_state(__lowerCamelCase ) # Load everything back in and make sure all states work accelerator.load_state(__lowerCamelCase ) test_rands += train(1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) ((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item() SCREAMING_SNAKE_CASE = optimizer.state_dict() self.assertEqual(__lowerCamelCase , __lowerCamelCase ) self.assertEqual(__lowerCamelCase , __lowerCamelCase ) self.assertEqual(__lowerCamelCase , __lowerCamelCase ) self.assertEqual(__lowerCamelCase , __lowerCamelCase ) def _snake_case ( self : Dict ): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) SCREAMING_SNAKE_CASE = DummyModel() SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1e-3 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = dummy_dataloaders() SCREAMING_SNAKE_CASE = ProjectConfiguration(automatic_checkpoint_naming=__lowerCamelCase ) # Train baseline SCREAMING_SNAKE_CASE = Accelerator(project_dir=__lowerCamelCase , project_config=__lowerCamelCase ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Save initial accelerator.save_state() ((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item() SCREAMING_SNAKE_CASE = optimizer.state_dict() SCREAMING_SNAKE_CASE = train(3 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) ((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item() SCREAMING_SNAKE_CASE = optimizer.state_dict() # Train partially set_seed(42 ) SCREAMING_SNAKE_CASE = DummyModel() SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1e-3 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = dummy_dataloaders() SCREAMING_SNAKE_CASE = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=__lowerCamelCase ) SCREAMING_SNAKE_CASE = Accelerator(project_dir=__lowerCamelCase , project_config=__lowerCamelCase ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) accelerator.load_state(os.path.join(__lowerCamelCase , "checkpoints" , "checkpoint_0" ) ) ((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item() SCREAMING_SNAKE_CASE = optimizer.state_dict() self.assertEqual(__lowerCamelCase , __lowerCamelCase ) self.assertEqual(__lowerCamelCase , __lowerCamelCase ) self.assertEqual(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = train(2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Save everything accelerator.save_state() # Load everything back in and make sure all states work accelerator.load_state(os.path.join(__lowerCamelCase , "checkpoints" , "checkpoint_1" ) ) test_rands += train(1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) ((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item() SCREAMING_SNAKE_CASE = optimizer.state_dict() self.assertEqual(__lowerCamelCase , __lowerCamelCase ) self.assertEqual(__lowerCamelCase , __lowerCamelCase ) self.assertEqual(__lowerCamelCase , __lowerCamelCase ) self.assertEqual(__lowerCamelCase , __lowerCamelCase ) def _snake_case ( self : Any ): SCREAMING_SNAKE_CASE = torch.tensor([1, 2, 3] ) SCREAMING_SNAKE_CASE = torch.tensor([2, 3, 4] ) SCREAMING_SNAKE_CASE = DummyModel() SCREAMING_SNAKE_CASE = torch.optim.Adam(net.parameters() ) SCREAMING_SNAKE_CASE = Accelerator() with self.assertRaises(__lowerCamelCase ) as ve: accelerator.register_for_checkpointing(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = str(ve.exception ) self.assertTrue("Item at index 0" in message ) self.assertTrue("Item at index 1" in message ) self.assertFalse("Item at index 2" in message ) self.assertFalse("Item at index 3" in message ) def _snake_case ( self : Tuple ): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) SCREAMING_SNAKE_CASE = DummyModel() SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1e-3 ) SCREAMING_SNAKE_CASE = torch.optim.lr_scheduler.StepLR(__lowerCamelCase , step_size=1 , gamma=0.99 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = dummy_dataloaders() SCREAMING_SNAKE_CASE = ProjectConfiguration(automatic_checkpoint_naming=__lowerCamelCase ) # Train baseline SCREAMING_SNAKE_CASE = Accelerator(project_dir=__lowerCamelCase , project_config=__lowerCamelCase ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Save initial accelerator.save_state() SCREAMING_SNAKE_CASE = scheduler.state_dict() train(3 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) self.assertNotEqual(__lowerCamelCase , scheduler.state_dict() ) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(__lowerCamelCase , "checkpoints" , "checkpoint_0" ) ) self.assertEqual(__lowerCamelCase , scheduler.state_dict() ) def _snake_case ( self : Any ): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) SCREAMING_SNAKE_CASE = DummyModel() SCREAMING_SNAKE_CASE = ProjectConfiguration(automatic_checkpoint_naming=__lowerCamelCase , total_limit=2 ) # Train baseline SCREAMING_SNAKE_CASE = Accelerator(project_dir=__lowerCamelCase , project_config=__lowerCamelCase ) SCREAMING_SNAKE_CASE = accelerator.prepare(__lowerCamelCase ) # Save 3 states: for _ in range(11 ): accelerator.save_state() self.assertTrue(not os.path.exists(os.path.join(__lowerCamelCase , "checkpoints" , "checkpoint_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase , "checkpoints" , "checkpoint_9" ) ) ) self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase , "checkpoints" , "checkpoint_10" ) ) ) @require_cuda def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )] execute_subprocess_async(__lowerCamelCase , env=os.environ.copy() ) if __name__ == "__main__": __A : Union[str, Any] = '/tmp/accelerate/state_checkpointing' __A : Any = DummyModel() __A : Optional[int] = torch.optim.Adam(params=model.parameters(), lr=1e-3) __A : Any = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99) __A , __A : List[Any] = dummy_dataloaders() __A : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline __A : Optional[Any] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no') if accelerator.process_index == 0: if os.path.exists(savedir): shutil.rmtree(savedir) os.makedirs(savedir) __A , __A , __A , __A , __A : Any = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) __A , __A : Optional[int] = accelerator.prepare(model, optimizer) train(3, model, train_dataloader, optimizer, accelerator, scheduler) # Check that the intial optimizer is loaded on the GPU for group in optimizer.param_groups: __A : Dict = group['params'][0].device break assert param_device.type == accelerator.device.type __A : int = model.cpu() accelerator.wait_for_everyone() accelerator.save_state() accelerator.wait_for_everyone() # Check CPU state accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu') for group in optimizer.param_groups: __A : Optional[Any] = group['params'][0].device break assert ( param_device.type == torch.device('cpu').type ), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}" # Check device state model.to(accelerator.device) accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device') for group in optimizer.param_groups: __A : Any = group['params'][0].device break assert ( param_device.type == accelerator.device.type ), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}" # Check error with pytest.raises(TypeError, match='Unsupported optimizer map location passed'): accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid') accelerator.wait_for_everyone() if accelerator.process_index == 0: shutil.rmtree(savedir) accelerator.wait_for_everyone()
16
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A : Dict = { 'configuration_bigbird_pegasus': [ 'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BigBirdPegasusConfig', 'BigBirdPegasusOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : int = [ 'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST', 'BigBirdPegasusForCausalLM', 'BigBirdPegasusForConditionalGeneration', 'BigBirdPegasusForQuestionAnswering', 'BigBirdPegasusForSequenceClassification', 'BigBirdPegasusModel', 'BigBirdPegasusPreTrainedModel', ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys __A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
16
1
def __a ( A__ : int , A__ : int ): if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) SCREAMING_SNAKE_CASE = str(bin(A__ ) )[2:] # remove the leading "0b" SCREAMING_SNAKE_CASE = str(bin(A__ ) )[2:] SCREAMING_SNAKE_CASE = max(len(A__ ) , len(A__ ) ) return "0b" + "".join( str(int("1" in (char_a, char_b) ) ) for char_a, char_b in zip(a_binary.zfill(A__ ) , b_binary.zfill(A__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
16
import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Any ): SCREAMING_SNAKE_CASE = "hf-internal-testing/tiny-random-t5" SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer("This is me" , return_tensors="pt" ) SCREAMING_SNAKE_CASE = model.to_bettertransformer() self.assertTrue(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) SCREAMING_SNAKE_CASE = model.generate(**__lowerCamelCase ) SCREAMING_SNAKE_CASE = model.reverse_bettertransformer() self.assertFalse(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase ) self.assertFalse( any("BetterTransformer" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) SCREAMING_SNAKE_CASE = model_reloaded.generate(**__lowerCamelCase ) self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase ) ) def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = "hf-internal-testing/tiny-random-t5" SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(__lowerCamelCase ): model.save_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = model.reverse_bettertransformer() model.save_pretrained(__lowerCamelCase )
16
1
from __future__ import annotations def __a ( A__ : str ): return [ord(A__ ) - 96 for elem in plain] def __a ( A__ : list[int] ): return "".join(chr(elem + 96 ) for elem in encoded ) def __a ( ): SCREAMING_SNAKE_CASE = encode(input("-> " ).strip().lower() ) print("Encoded: " , A__ ) print("Decoded:" , decode(A__ ) ) if __name__ == "__main__": main()
16
import copy import inspect import unittest from transformers import PretrainedConfig, SwiftFormerConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwiftFormerForImageClassification, SwiftFormerModel from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : int=13 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : str=True , __lowerCamelCase : Any=True , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Optional[int]=224 , __lowerCamelCase : Any=1000 , __lowerCamelCase : Optional[Any]=[3, 3, 6, 4] , __lowerCamelCase : List[Any]=[48, 56, 112, 220] , ): SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = is_training SCREAMING_SNAKE_CASE = use_labels SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = num_labels SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = layer_depths SCREAMING_SNAKE_CASE = embed_dims def _snake_case ( self : Optional[Any] ): SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE = None if self.use_labels: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels ) SCREAMING_SNAKE_CASE = self.get_config() return config, pixel_values, labels def _snake_case ( self : Dict ): return SwiftFormerConfig( depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=__lowerCamelCase , layer_scale_init_value=1e-5 , ) def _snake_case ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ): SCREAMING_SNAKE_CASE = SwiftFormerModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) ) def _snake_case ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] ): SCREAMING_SNAKE_CASE = self.num_labels SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE = model(__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self : int ): ((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else () lowerCamelCase__ = ( {"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification} if is_torch_available() else {} ) lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = SwiftFormerModelTester(self ) SCREAMING_SNAKE_CASE = ConfigTester( self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , ) def _snake_case ( self : List[Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="SwiftFormer does not use inputs_embeds" ) def _snake_case ( self : Optional[int] ): pass def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) SCREAMING_SNAKE_CASE = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) ) def _snake_case ( self : List[Any] ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) SCREAMING_SNAKE_CASE = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def _snake_case ( self : Tuple ): for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE = SwiftFormerModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) @unittest.skip(reason="SwiftFormer does not output attentions" ) def _snake_case ( self : Union[str, Any] ): pass def _snake_case ( self : Optional[Any] ): def check_hidden_states_output(__lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Tuple ): SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) SCREAMING_SNAKE_CASE = outputs.hidden_states SCREAMING_SNAKE_CASE = 8 self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase ) # TODO # SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width) # with the width and height being successively divided by 2, after every 2 blocks for i in range(len(__lowerCamelCase ) ): self.assertEqual( hidden_states[i].shape , torch.Size( [ self.model_tester.batch_size, self.model_tester.embed_dims[i // 2], (self.model_tester.image_size // 4) // 2 ** (i // 2), (self.model_tester.image_size // 4) // 2 ** (i // 2), ] ) , ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def _snake_case ( self : List[Any] ): def _config_zero_init(__lowerCamelCase : List[Any] ): SCREAMING_SNAKE_CASE = copy.deepcopy(__lowerCamelCase ) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(__lowerCamelCase , __lowerCamelCase , 1e-10 ) if isinstance(getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ): SCREAMING_SNAKE_CASE = _config_zero_init(getattr(__lowerCamelCase , __lowerCamelCase ) ) setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) return configs_no_init SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE = _config_zero_init(__lowerCamelCase ) for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = model_class(config=__lowerCamelCase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , ) @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def _snake_case ( self : str ): pass def __a ( ): SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @cached_property def _snake_case ( self : List[str] ): return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None @slow def _snake_case ( self : List[Any] ): SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.default_image_processor SCREAMING_SNAKE_CASE = prepare_img() SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ) # verify the logits SCREAMING_SNAKE_CASE = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) SCREAMING_SNAKE_CASE = torch.tensor([[-2.17_03e00, 2.11_07e00, -2.08_11e00]] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
16
1
from collections.abc import Callable import numpy as np def __a ( A__ : Callable , A__ : float , A__ : float , A__ : float , A__ : float ): SCREAMING_SNAKE_CASE = int(np.ceil((x_end - xa) / step_size ) ) SCREAMING_SNAKE_CASE = np.zeros((n + 1,) ) SCREAMING_SNAKE_CASE = ya SCREAMING_SNAKE_CASE = xa for k in range(A__ ): SCREAMING_SNAKE_CASE = y[k] + step_size * ode_func(A__ , y[k] ) SCREAMING_SNAKE_CASE = y[k] + ( (step_size / 2) * (ode_func(A__ , y[k] ) + ode_func(x + step_size , A__ )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
16
import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings __A : Optional[Any] = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = field(default=__snake_case , metadata={"help": "Whether to use SortishSampler or not."} ) lowerCamelCase__ = field( default=__snake_case , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} ) lowerCamelCase__ = field( default=__snake_case , metadata={ "help": ( "The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `max_length` value of the model configuration." ) } , ) lowerCamelCase__ = field( default=__snake_case , metadata={ "help": ( "The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `num_beams` value of the model configuration." ) } , ) lowerCamelCase__ = field( default=__snake_case , metadata={ "help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction." } , ) def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = super().to_dict() for k, v in d.items(): if isinstance(__lowerCamelCase , __lowerCamelCase ): SCREAMING_SNAKE_CASE = v.to_dict() return d
16
1