content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import re import string def process_tweet(tweet): """Process tweet function. Input: tweet: a string containing a tweet Output: tweets_clean: a list of words containing the processed tweet""" stemmer = PorterStemmer() stopwords_english = stopwords.words('english') # Remove stock market tickers like $GE tweet = re.sub(r'\$\w*', '', tweet) # Remove old style retweet text "RT" tweet = re.sub(r'^RT[\s]+', '', tweet) # Remove hyperlinks tweet = re.sub(r'https?:\/\/.*[\r\n]*', '', tweet) # Remove hashtags # Only removing the hash # sign from the word tweet = re.sub(r'#', '', tweet) # Tokenize tweets tokenizer = TweetTokenizer(preserve_case=False, strip_handles=True, reduce_len=True) tweet_tokens = tokenizer.tokenize(tweet) tweets_clean = [] for word in tweet_tokens: # 1 Remove stopwords # 2 Remove punctuation if (word not in stopwords_english and word not in string.punctuation): # 3 Stemming word stem_word = stemmer.stem(word) # 4 Add it to tweets_clean tweets_clean.append(stem_word) return tweets_clean
2b69f70cfec5f90a6e58408fcd054cda7ad0f20a
3,655,798
def oracle_query_id(sender_id, nonce, oracle_id): """ Compute the query id for a sender and an oracle :param sender_id: the account making the query :param nonce: the nonce of the query transaction :param oracle_id: the oracle id """ def _int32(val): return val.to_bytes(32, byteorder='big') return hash_encode("oq", decode(sender_id) + _int32(nonce) + decode(oracle_id))
aa97834efd3df10951e05b99035dbef8210ba33d
3,655,799
from typing import List from typing import Set def knapsack_with_budget(vals: List[float], weights: List[int], budget: int, cap: int) -> Set[int]: """ Solves the knapsack problem (with budget) of the items with the given values and weights, with the given budget and capacity, in an bottom-up way. :param vals: list[float] :param weights: list[int] :param budget: int :param cap: int :return: set{int} """ # Check whether the input arrays are None or empty if not vals: return set() # Check whether the input budget is non-negative if budget < 0: return set() # Check whether the input capacity is non-negative if cap < 0: raise set() n = len(vals) # Initialization subproblems = [ [[0.0] * (cap + 1) for _ in range(budget + 1)] for _ in range(n) ] for b in range(budget + 1): for x in range(cap + 1): if b >= 1 and weights[0] <= x: subproblems[0][b][x] = vals[0] # Bottom-up calculation for item in range(1, n): for b in range(budget + 1): for x in range(cap + 1): if b <= 0 or weights[item] > x: subproblems[item][b][x] = subproblems[item - 1][b][x] else: result_without_curr = subproblems[item - 1][b][x] result_with_curr = \ subproblems[item - 1][b - 1][x - weights[item]] + \ vals[item] subproblems[item][b][x] = max(result_without_curr, result_with_curr) return _reconstruct(vals, weights, budget, cap, subproblems) # Overall running time complexity: O(n*k*W), where k is the budget and W is # the knapsack capacity
3d91f18f8be7b82f17ebcda9dbfa419eadeec0ea
3,655,800
import functools def _basemap_redirect(func): """ Docorator that calls the basemap version of the function of the same name. This must be applied as the innermost decorator. """ name = func.__name__ @functools.wraps(func) def wrapper(self, *args, **kwargs): if getattr(self, 'name', '') == 'basemap': return getattr(self.projection, name)(*args, ax=self, **kwargs) else: return func(self, *args, **kwargs) wrapper.__doc__ = None return wrapper
f3cee9113a6f8044255d3013e357742e231ea98e
3,655,801
def embedding_lookup(input_ids, vocab_size, embedding_size=128, initializer_range=0.02, word_embedding_name="word_embeddings"): """Looks up words embeddings for id tensor. Args: input_ids: int32 Tensor of shape [batch_size, seq_length] containing word ids. vocab_size: int. Size of the embedding vocabulary. embedding_size: int. Width of the word embeddings. initializer_range: float. Embedding initialization range. word_embedding_name: string. Name of the embedding table. Returns: float Tensor of shape [batch_size, seq_length, embedding_size]. """ # This function assumes that the input is of shape [batch_size, seq_length, # num_inputs]. # # If the input is a 2D tensor of shape [batch_size, seq_length], we # reshape to [batch_size, seq_length, 1]. if input_ids.shape.ndims == 2: input_ids = tf.expand_dims(input_ids, axis=[-1]) embedding_table = tf.get_variable( name=word_embedding_name, shape=[vocab_size, embedding_size], initializer=create_initializer(initializer_range)) output = tf.nn.embedding_lookup(embedding_table, input_ids) input_shape = get_shape_list(input_ids) output = tf.reshape(output, input_shape[0:-1] + [input_shape[-1] * embedding_size]) return output, embedding_table
2f66d05ab70f4fb38d990e66ec5829cb62fdc934
3,655,802
import re def find_version(infile): """ Given an open file (or some other iterator of lines) holding a configure.ac file, find the current version line. """ for line in infile: m = re.search(r'AC_INIT\(\[tor\],\s*\[([^\]]*)\]\)', line) if m: return m.group(1) return None
35ac18757ee1156f046bbd9ffa68ed4898bc317a
3,655,803
import math def linear_warmup_decay(warmup_steps, total_steps, cosine=True, linear=False): """ Linear warmup for warmup_steps, optionally with cosine annealing or linear decay to 0 at total_steps """ # check if both decays are not True at the same time assert not (linear and cosine) def fn(step): if step < warmup_steps: return float(step) / float(max(1, warmup_steps)) if not (cosine or linear): # no decay return 1.0 progress = float(step - warmup_steps) / float( max(1, total_steps - warmup_steps) ) if cosine: # cosine decay return 0.5 * (1.0 + math.cos(math.pi * progress)) # linear decay return 1.0 - progress return fn
9326622a07be677cb82744a30850674ca3c5f789
3,655,804
def query_anumbers(bbox,bbox2,bounds2): """ Queries anumbers of the reports within region defined Args: `bbox`= bounds of the region defined Returns: `anumberscode`=list of anumbers """ try: collars_file='http://geo.loop-gis.org/geoserver/loop/wfs?service=WFS&version=1.0.0&request=GetFeature&typeName=loop:collar_4326&bbox='+bbox2+'&srs=EPSG:4326' collars = gpd.read_file(collars_file, bbox=bbox) print("Connected to Loop Server") anumbers=gpd.GeoDataFrame(collars, columns=["anumber"]) anumbers = pd.DataFrame(anumbers.drop_duplicates(subset=["anumber"])) except HTTPError as err: if err.code == 404 or err.code == 500 or err.code == 503: query="""SELECT DISTINCT (collar.anumber) FROM public.collar WHERE(longitude BETWEEN %s AND %s) AND (latitude BETWEEN %s AND %s) ORDER BY collar.anumber ASC""" conn = psycopg2.connect(host="130.95.198.59", port = 5432, database="gswa_dh", user="postgres", password="loopie123pgpw") cur = conn.cursor() cur.execute(query, bounds2) anumbers=pd.DataFrame(cur, columns=["anumber"]) print("Connected to PostgreSQL Server") else: raise #collars_file='http://geo.loop-gis.org/geoserver/loop/wfs?service=WFS&version=1.0.0&request=GetFeature&typeName=loop:collar_4326&bbox='+bbox2+'&srs=EPSG:4326' #collars = gpd.read_file(collars_file, bbox=bbox) #anumbers=gpd.GeoDataFrame(collars, columns=["anumber"]) #anumbers = pd.DataFrame(anumbers.drop_duplicates(subset=["anumber"])) #print(anumbers) anumbers['anumberlength']=anumbers['anumber'].astype(str).map(len) anumberscode=[] for index, row in anumbers.iterrows(): if (int(row[1])==5): text=str("a0"+ str(row[0])) text2=str("a"+ str(row[0])) elif (int(row[1])==4): text=str("a00"+ str(row[0])) text2=str("a"+ str(row[0])) elif (int(row[1])==3): text=str("a000"+ str(row[0])) text2=str("a"+ str(row[0])) elif (int(row[1])==2): text=str("a0000"+ str(row[0])) text2=str("a"+ str(row[0])) elif (int(row[1])==1): text=str("a00000"+ str(row[0])) text2=str("a"+ str(row[0])) else: text= str("a"+ str(row[0])) anumberscode.append(text) anumberscode.append(text2) print("Report Numbers:", anumberscode) return anumberscode
91a31ba05df1a88f1c665f7d4dbb1c2d26bb2cc9
3,655,805
def Parse(spec_name, arg_r): # type: (str, args.Reader) -> args._Attributes """Parse argv using a given FlagSpec.""" spec = FLAG_SPEC[spec_name] return args.Parse(spec, arg_r)
9dc2de95e8f9001eff82f16de6e14f51f768306f
3,655,806
def get_path_url(path: PathOrString) -> str: """Covert local path to URL Arguments: path {str} -- path to file Returns: str -- URL to file """ path_obj, path_str = get_path_forms(path) if is_supported_scheme(path_str): return build_request(path_str) return path_obj.absolute().as_uri()
812471da77d59cc0f331b5a031282abb5847f054
3,655,807
def process_keyqueue(codes, more_available): """ codes -- list of key codes more_available -- if True then raise MoreInputRequired when in the middle of a character sequence (escape/utf8/wide) and caller will attempt to send more key codes on the next call. returns (list of input, list of remaining key codes). """ code = codes[0] if code >= 32 and code <= 126: key = chr(code) return [key], codes[1:] if code in _keyconv: return [_keyconv[code]], codes[1:] if code >0 and code <27: return ["ctrl %s" % chr(ord('a')+code-1)], codes[1:] if code >27 and code <32: return ["ctrl %s" % chr(ord('A')+code-1)], codes[1:] em = str_util.get_byte_encoding() if (em == 'wide' and code < 256 and within_double_byte(chr(code),0,0)): if not codes[1:]: if more_available: raise MoreInputRequired() if codes[1:] and codes[1] < 256: db = chr(code)+chr(codes[1]) if within_double_byte(db, 0, 1): return [db], codes[2:] if em == 'utf8' and code>127 and code<256: if code & 0xe0 == 0xc0: # 2-byte form need_more = 1 elif code & 0xf0 == 0xe0: # 3-byte form need_more = 2 elif code & 0xf8 == 0xf0: # 4-byte form need_more = 3 else: return ["<%d>"%code], codes[1:] for i in range(need_more): if len(codes)-1 <= i: if more_available: raise MoreInputRequired() else: return ["<%d>"%code], codes[1:] k = codes[i+1] if k>256 or k&0xc0 != 0x80: return ["<%d>"%code], codes[1:] s = bytes3(codes[:need_more+1]) assert isinstance(s, bytes) try: return [s.decode("utf-8")], codes[need_more+1:] except UnicodeDecodeError: return ["<%d>"%code], codes[1:] if code >127 and code <256: key = chr(code) return [key], codes[1:] if code != 27: return ["<%d>"%code], codes[1:] result = input_trie.get(codes[1:], more_available) if result is not None: result, remaining_codes = result return [result], remaining_codes if codes[1:]: # Meta keys -- ESC+Key form run, remaining_codes = process_keyqueue(codes[1:], more_available) if urwid.util.is_mouse_event(run[0]): return ['esc'] + run, remaining_codes if run[0] == "esc" or run[0].find("meta ") >= 0: return ['esc']+run, remaining_codes return ['meta '+run[0]]+run[1:], remaining_codes return ['esc'], codes[1:]
8a49f55ca760853176c319487936c8e93911535e
3,655,808
from typing import Dict from typing import List from typing import Tuple def allowed_transitions(constraint_type: str, labels: Dict[int, str]) -> List[Tuple[int, int]]: """ Given labels and a constraint type, returns the allowed transitions. It will additionally include transitions for the start and end states, which are used by the conditional random field. # Parameters constraint_type : `str`, required Indicates which constraint to apply. Current choices are "BIO", "IOB1", "BIOUL", and "BMES". labels : `Dict[int, str]`, required A mapping {label_id -> label}. # Returns `List[Tuple[int, int]]` The allowed transitions (from_label_id, to_label_id). """ num_labels = len(labels) start_tag = num_labels end_tag = num_labels + 1 labels_with_boundaries = list(labels.items()) + [(start_tag, "START"), (end_tag, "END")] allowed = [] for from_label_index, from_label in labels_with_boundaries: if from_label in ("START", "END"): from_tag = from_label from_entity = "" else: from_tag = from_label[0] from_entity = from_label[1:] for to_label_index, to_label in labels_with_boundaries: if to_label in ("START", "END"): to_tag = to_label to_entity = "" else: to_tag = to_label[0] to_entity = to_label[1:] if is_transition_allowed(constraint_type, from_tag, from_entity, to_tag, to_entity): allowed.append((from_label_index, to_label_index)) return allowed
173dd26c17156ecd73ba1181022183b68f158331
3,655,809
def simple_linear(parent = None, element_count=16, element_pitch=7e-3): """1D line of elements, starting at xyz=0, along y, with given element_pitch Parameters ---------- parent : handybeam.world.World the world to give to this array as parent element_count : int count of elements. element_pitch : float distance between elements """ this = TxArray(parent) this.name = 'a line of elements, starting at xyz=0, along y, spaced by {:0.1f}mm'.format(element_pitch*1e3) this.tx_array_element_descriptor = np.zeros((element_count, 16), dtype=np.float32) half_length = (element_count*element_pitch)/2 for array_element_iy in range(element_count): # add an element at that indexed location element_idx = array_element_iy loc_x = 0 loc_y = (array_element_iy-(element_pitch/2)+0.5) * element_pitch - half_length this.tx_array_element_descriptor[element_idx, :] = \ this.generate_tx_array_element(x=loc_x, y=loc_y, amplitude_ratio_setting=1.0) return this
7cb7a2f5de6ea4ecbe0a67ca8f383bae2bd0f5b0
3,655,812
def for_all_methods(decorator, exclude_methods=None): """ Class decorator """ if exclude_methods is None: exclude_methods = [] def decorate(cls): for attr in cls.__dict__: if ( callable(getattr(cls, attr)) and attr not in DO_NOT_DECORATE_METHODS and attr not in exclude_methods ): setattr(cls, attr, decorator(getattr(cls, attr))) return cls return decorate
6a24961ebd512a20f3b0cad9c3657fa6ff5997ea
3,655,813
def _kuramoto_sivashinsky_old(dimensions, system_size, dt, time_steps): """ This function INCORRECTLY simulates the Kuramoto–Sivashinsky PDE It is kept here only for historical reasons. DO NOT USE UNLESS YOU WANT INCORRECT RESULTS Even though it doesn't use the RK4 algorithm, it is bundled with the other simulation functions in simulate_trajectory() for consistency. Reference for the numerical integration: "fourth order time stepping for stiff pde-kassam trefethen 2005" at https://people.maths.ox.ac.uk/trefethen/publication/PDF/2005_111.pdf Python implementation at: https://github.com/E-Renshaw/kuramoto-sivashinsky Args: dimensions (int): nr. of dimensions of the system grid system_size (int): physical size of the system dt (float): time step size time_steps (int): nr. of time steps to simulate Returns: (np.ndarray): simulated trajectory of shape (time_steps, dimensions) """ n = dimensions # No. of grid points in real space (and hence dimensionality of the output) size = system_size # # Define initial conditions and Fourier Transform them x = np.transpose(np.conj(np.arange(1, n + 1))) / n u = np.cos(2 * np.pi * x / size) * (1 + np.sin(2 * np.pi * x / size)) v = np.fft.fft(u) h = dt # time step nmax = time_steps # No. of time steps to simulate # Wave numbers k = np.transpose( np.conj(np.concatenate((np.arange(0, n / 2), np.array([0]), np.arange(-n / 2 + 1, 0))))) * 2 * np.pi / size # Just copied from the paper, it works L = k ** 2 - k ** 4 E = np.exp(h * L) E_2 = np.exp(h * L / 2) M = 16 # M = (size * np.pi) //2 r = np.exp(1j * np.pi * (np.arange(1, M + 1) - 0.5) / M) LR = h * np.transpose(np.repeat([L], M, axis=0)) + np.repeat([r], n, axis=0) Q = h * np.real(np.mean((np.exp(LR / 2) - 1) / LR, axis=1)) f1 = h * np.real(np.mean((-4 - LR + np.exp(LR) * (4 - 3 * LR + LR ** 2)) / LR ** 3, axis=1)) f2 = h * np.real(np.mean((2 + LR + np.exp(LR) * (-2 + LR)) / LR ** 3, axis=1)) f3 = h * np.real(np.mean((-4 - 3 * LR - LR ** 2 + np.exp(LR) * (4 - LR)) / LR ** 3, axis=1)) uu = [np.array(u)] # List of Real space solutions, later converted to a np.array g = -0.5j * k # See paper for details for n in range(1, nmax + 1): Nv = g * np.fft.fft(np.real(np.fft.ifft(v)) ** 2) a = E_2 * v + Q * Nv Na = g * np.fft.fft(np.real(np.fft.ifft(a)) ** 2) b = E_2 * v + Q * Na Nb = g * np.fft.fft(np.real(np.fft.ifft(b)) ** 2) c = E_2 * a + Q * (2 * Nb - Nv) Nc = g * np.fft.fft(np.real(np.fft.ifft(c)) ** 2) v = E * v + Nv * f1 + 2 * (Na + Nb) * f2 + Nc * f3 u = np.real(np.fft.ifft(v)) uu.append(np.array(u)) uu = np.array(uu) # print("PDE simulation finished") return uu
3c0158946b1220e0fa56bea201e2ee31d6df51e5
3,655,815
def get_geneids_of_user_entity_ids(cursor, unification_table, user_entity_ids): """ Get the Entrez Gene IDs of targets using their BIANA user entity ids """ query_geneid = ("""SELECT G.value, G.type FROM externalEntityGeneID G, {} U WHERE U.externalEntityID = G.externalEntityID AND U.userEntityID = %s """.format(unification_table)) print('\nRETRIEVING GENE IDS ASSOCIATED TO USER ENTITY IDS...\n') ueid_to_geneid_to_types = {} for ueid in user_entity_ids: cursor.execute(query_geneid, (ueid,)) for row in cursor: geneid, geneid_type = row #print(ueid, geneid, geneid_type) ueid_to_geneid_to_types.setdefault(ueid, {}) ueid_to_geneid_to_types[ueid].setdefault(str(geneid), set()).add(geneid_type.lower()) print('NUMBER OF USER ENTITIES ASSOCIATED WITH GENE IDS: {}'.format(len(ueid_to_geneid_to_types))) return ueid_to_geneid_to_types
bf192c192352da64716ecab6b4523b50fea5cd0f
3,655,816
def int_array_to_hex(iv_array): """ Converts an integer array to a hex string. """ iv_hex = '' for b in iv_array: iv_hex += '{:02x}'.format(b) return iv_hex
f3332b7672a266ad9cae9fc52bc8e1152bcee58b
3,655,819
from io import StringIO import logging import tempfile def minimal_sphinx_app( configuration=None, sourcedir=None, with_builder=False, raise_on_warning=False ): """Create a minimal Sphinx environment; loading sphinx roles, directives, etc.""" class MockSphinx(Sphinx): """Minimal sphinx init to load roles and directives.""" def __init__(self, confoverrides=None, srcdir=None, raise_on_warning=False): self.extensions = {} self.registry = SphinxComponentRegistry() self.html_themes = {} self.events = EventManager(self) # logging self.verbosity = 0 self._warncount = 0 self.warningiserror = raise_on_warning self._status = StringIO() self._warning = StringIO() logging.setup(self, self._status, self._warning) self.tags = Tags([]) self.config = Config({}, confoverrides or {}) self.config.pre_init_values() self._init_i18n() for extension in builtin_extensions: self.registry.load_extension(self, extension) # fresh env self.doctreedir = "" self.srcdir = srcdir self.confdir = None self.outdir = "" self.project = Project(srcdir=srcdir, source_suffix={".md": "markdown"}) self.project.docnames = {"mock_docname"} self.env = BuildEnvironment() self.env.setup(self) self.env.temp_data["docname"] = "mock_docname" # Ignore type checkers because we disrespect superclass typing here self.builder = None # type: ignore[assignment] if not with_builder: return # this code is only required for more complex parsing with extensions for extension in self.config.extensions: self.setup_extension(extension) buildername = "dummy" self.preload_builder(buildername) self.config.init_values() self.events.emit("config-inited", self.config) with tempfile.TemporaryDirectory() as tempdir: # creating a builder attempts to make the doctreedir self.doctreedir = tempdir self.builder = self.create_builder(buildername) self.doctreedir = "" app = MockSphinx( confoverrides=configuration, srcdir=sourcedir, raise_on_warning=raise_on_warning ) return app
55c911a16748e61ff3461833e82661314c5ffdca
3,655,820
def calc_Mo_from_M(M, C=C): """ Calculate seismic moment (Mo) from moment magnitude (M) given a scaling law. C is a scaling constant; should be set at 6, but is defined elsewhere in the module so that all functions using it share a value. """ term1 = 3/2. * C * (np.log(2) + np.log(5) ) term2 = 3/2. * M * (np.log(2) + np.log(5) ) Mo = np.exp( term1 + term2) return Mo
f72033100829126a353d7682f449d0ff4cd3efa8
3,655,821
import pathlib def _file_format_from_filename(filename): """Determine file format from its name.""" filename = pathlib.Path(filename).name return _file_formats[filename] if filename in _file_formats else ""
25f90333696491ddd7b522ca2ac24c84a09e8d07
3,655,823
def r2k(value): """ converts temperature in R(degrees Rankine) to K(Kelvins) :param value: temperature in R(degrees Rankine) :return: temperature in K(Kelvins) """ return const.convert_temperature(value, 'R', 'K')
93c3a7ead8b6b15fc141cd6339acedc044dd2c61
3,655,824
import select def add_version(project, publication_id): """ Takes "title", "filename", "published", "sort_order", "type" as JSON data "type" denotes version type, 1=base text, 2=other variant Returns "msg" and "version_id" on success, otherwise 40x """ request_data = request.get_json() if not request_data: return jsonify({"msg": "No data provided."}), 400 title = request_data.get("title", None) filename = request_data.get("filename", None) published = request_data.get("published", None) sort_order = request_data.get("sort_order", None) version_type = request_data.get("type", None) publications = get_table("publication") versions = get_table("publication_version") query = select([publications]).where(publications.c.id == int_or_none(publication_id)) connection = db_engine.connect() result = connection.execute(query).fetchone() if result is None: connection.close() return jsonify("No such publication exists."), 404 values = {"publication_id": int(publication_id)} if title is not None: values["name"] = title if filename is not None: values["original_filename"] = filename if published is not None: values["published"] = published if sort_order is not None: values["sort_order"] = sort_order if version_type is not None: values["type"] = version_type insert = versions.insert().values(**values) result = connection.execute(insert) return jsonify({ "msg": "Created new version object.", "version_id": int(result.inserted_primary_key[0]) }), 201
b6887e5d09e54827ed4f5ad50f1c3e404d55e821
3,655,825
import functools def to_decorator(wrapped_func): """ Encapsulates the decorator logic for most common use cases. Expects a wrapped function with compatible type signature to: wrapped_func(func, args, kwargs, *outer_args, **outer_kwargs) Example: @to_decorator def foo(func, args, kwargs): print(func) return func(*args, **kwargs) @foo() def bar(): print(42) """ @functools.wraps(wrapped_func) def arg_wrapper(*outer_args, **outer_kwargs): def decorator(func): @functools.wraps(func) def wrapped(*args, **kwargs): return wrapped_func(func, args, kwargs, *outer_args, **outer_kwargs) return wrapped return decorator return arg_wrapper
d7c9d0e759e59c26b7c5f7b098e15b78314c8860
3,655,826
def _get_unit(my_str): """ Get unit label from suffix """ # matches = [my_str.endswith(suffix) for suffix in _known_units] # check to see if unit makes sense if not any(matches): raise KeyError('Unit unit not recognized <{}>!'.format(my_str)) # pick unit that matches, with prefix matched_unit = [unit for unit,match in zip(_known_units,matches) if match][0] unit_dict = _unit_dict(matched_unit) return matched_unit,unit_dict[my_str]
86cbb00dbd95025fde265461963e45d457d68470
3,655,827
import random def spec_augment(spectrogram, time_mask_para=70, freq_mask_para=20, time_mask_num=2, freq_mask_num=2): """ Provides Augmentation for audio Args: spectrogram, time_mask_para, freq_mask_para, time_mask_num, freq_mask_num spectrogram (torch.Tensor): spectrum time_mask_para (int): Hyper Parameter for Time Masking to limit time masking length freq_mask_para (int): Hyper Parameter for Freq Masking to limit freq masking length time_mask_num (int): how many time-masked area to make freq_mask_num (int): how many freq-masked area to make Returns: feat - **feat**: Augmented feature Reference: 「SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition」Google Brain Team. 2019. https://github.com/DemisEom/SpecAugment/blob/master/SpecAugment/spec_augment_pytorch.py Examples:: Generate spec augmentation from a feature >>> spec_augment(spectrogram, time_mask_para=70, freq_mask_para=20, n_time_mask=2, freq_mask_num=2) Tensor([[ -5.229e+02, 0, ..., -5.229e+02, -5.229e+02], [ 7.105e-15, 0, ..., -7.105e-15, -7.105e-15], ..., [ 0, 0, ..., 0, 0], [ 3.109e-14, 0, ..., 2.931e-14, 2.931e-14]]) """ length = spectrogram.size(0) n_mels = spectrogram.size(1) # time mask for _ in range(time_mask_num): t = np.random.uniform(low=0.0, high=time_mask_para) t = int(t) if length - t > 0: t0 = random.randint(0, length - t) spectrogram[t0: t0 + t, :] = 0 # freq mask for _ in range(freq_mask_num): f = np.random.uniform(low=0.0, high=freq_mask_para) f = int(f) f0 = random.randint(0, n_mels - f) spectrogram[:, f0: f0 + f] = 0 return spectrogram
a2f1c669253250a581a555a531db79fb756b91bb
3,655,828
def scale(a: tuple, scalar: float) -> tuple: """Scales the point.""" return a[0] * scalar, a[1] * scalar
9638b8cfbd792c2deb35da304c5c375e0402404e
3,655,829
def parse_env(env): """Parse the given environment and return useful information about it, such as whether it is continuous or not and the size of the action space. """ # Determine whether input is continuous or discrete. Generally, for # discrete actions, we will take the softmax of the output # probabilities and for the continuous we will use the linear output, # rescaled to the action space. action_is_continuous = False action_low = None action_high = None if isinstance(env.action_space, gym.spaces.Discrete): action_size = env.action_space.n else: action_is_continuous = True action_low = env.action_space.low action_high = env.action_space.high action_size = env.action_space.low.shape[0] return action_is_continuous, action_size, action_low, action_high
4f5c97e71b7c1e8a319c28c4c1c26a1b758c731b
3,655,830
def encode_dataset(dataset, tester, mode="gate"): """ dataset: object from the `word-embeddings-benchmarks` repo dataset.X: a list of lists of pairs of word dataset.y: similarity between these pairs tester: tester implemented in my `tester.py`""" words_1 = [x[0] for x in dataset["X"]] encoded_words_1 = encode_words( words_1, tester, mode=mode ) encoded_words_2 = encode_words( [x[1] for x in dataset["X"]], tester, mode=mode ) return encoded_words_1, encoded_words_2
726aed93e3cef49f014d44f62e5ff73eae47da43
3,655,831
import csv def readData(filename): """ Read in our data from a CSV file and create a dictionary of records, where the key is a unique record ID and each value is dict """ data_d = {} with open(filename) as f: reader = csv.DictReader(f) for row in reader: clean_row = [(k, preProcess(v)) for (k, v) in row.items()] row_id = str(int(row[fieldNameFileNo])) + '.' + str(int(row[fieldNameIdCol])) data_d[row_id] = dict(clean_row) return data_d
193901c98966f4c0bd2b0e326711b962197ef4da
3,655,834
def update_roi_mask(roi_mask1, roi_mask2): """Y.G. Dec 31, 2016 Update qval_dict1 with qval_dict2 Input: roi_mask1, 2d-array, label array, same shape as xpcs frame, roi_mask2, 2d-array, label array, same shape as xpcs frame, Output: roi_mask, 2d-array, label array, same shape as xpcs frame, update roi_mask1 with roi_mask2 """ roi_mask = roi_mask1.copy() w = np.where(roi_mask2) roi_mask[w] = roi_mask2[w] + np.max(roi_mask) return roi_mask
211d6db69438866ff64c1944fa513ab847d9e641
3,655,836
from typing import List from typing import Tuple from typing import Dict from typing import Any def training_loop( train_sequences: List[Tuple[pd.DataFrame, float]], val_sequences: List[Tuple[pd.DataFrame, float]], test_sequences: List[Tuple[pd.DataFrame, float]], parameters: Dict[str, Any], dir_path: str, ): """ Training loop for the LSTM model. Parameters ---------- train_sequences: List[Tuple[pd.DataFrame, float]] List of training sequences. val_sequences: List[Tuple[pd.DataFrame, float]] List of validation sequences. test_sequences: List[Tuple[pd.DataFrame, float]] List of test sequences. parameters: Dict[str, Any] Hyperparameters for the model. dir_path: str Path to the directory where the model will be saved. """ seed_everything(42, workers=True) logger = WandbLogger(project=parameters["wandb_project"]) gpu_value = 1 if parameters["run_on_gpu"] is True else 0 model = PricePredictor( batch_size=parameters["train_batch_size"], dropout_rate=parameters["dropout_rate"], hidden_size=parameters["hidden_size"], learning_rate=parameters["learning_rate"], number_of_features=parameters["number_of_features"], number_of_layers=parameters["number_of_layers"], run_on_gpu=parameters["run_on_gpu"], ) data_module = LSTMDataLoader( train_sequences=train_sequences, val_sequences=val_sequences, test_sequences=test_sequences, train_batch_size=parameters["train_batch_size"], val_batch_size=parameters["val_batch_size"], train_workers=parameters["train_workers"], val_workers=parameters["val_workers"], ) checkpoint_callback = callbacks.ModelCheckpoint( dirpath=dir_path, save_top_k=1, verbose=True, monitor="valid/loss", mode="min", ) early_stopping_callback = callbacks.EarlyStopping( monitor="valid/loss", patience=2, verbose=True, mode="min", ) trainer = Trainer( max_epochs=parameters["max_epochs"], logger=logger, callbacks=[checkpoint_callback, early_stopping_callback], gpus=gpu_value, log_every_n_steps=parameters["log_n_steps"], progress_bar_refresh_rate=10, deterministic=True, ) trainer.fit(model, data_module) trainer.test(model, data_module) return {"training_done": True}
27b02630173d972a83c82140e0d2c6c957266fa4
3,655,838
def nmi(X, y): """ Normalized mutual information between X and y. :param X: :param y: """ mi = mutual_info_regression(X, y) return mi / mi.max()
5da09b9395883f9b197b2c2add7850d0e1870c44
3,655,839
from typing import Optional import re def attribute_as_str(path: str, name: str) -> Optional[str]: """Return the two numbers found behind --[A-Z] in path. If several matches are found, the last one is returned. Parameters ---------- path : string String with path of file/folder to get attribute from. name : string Name of attribute to get. Should be A-Z or a-z (implicit converted to uppercase). Returns ------- string Returns two digit number found in path behind --name. """ matches = re.findall("--" + name.upper() + "([0-9]{2})", path) if matches: return str(matches[-1]) return None
257fec03ca911c703e5e06994477cf0b3b75a2ae
3,655,840
def validate_inputs(input_data: pd.DataFrame) -> pd.DataFrame: """Check model for unprocessable values.""" valudated_data = input_data.copy() # check for numerical variables with NA not seen during training return validated_data
65087650e9a5e85c3a362a5e27f82bf5f27a1f59
3,655,842
def index(): """ Check if user is authenticated and render index page Or login page """ if current_user.is_authenticated: user_id = current_user._uid return render_template('index.html', score=get_score(user_id), username=get_username(user_id)) else: return redirect(url_for('login'))
edb8ad552ab34640fc030250659ebd05027712fa
3,655,843
from typing import Iterable from typing import Callable from typing import Optional def pick( seq: Iterable[_T], func: Callable[[_T], float], maxobj: Optional[_T] = None ) -> Optional[_T]: """Picks the object obj where func(obj) has the highest value.""" maxscore = None for obj in seq: score = func(obj) if maxscore is None or maxscore < score: (maxscore, maxobj) = (score, obj) return maxobj
7f29c3aef5086957a1b1bd97f086a6ba6fb22cfd
3,655,844
def rsync_public_key(server_list): """ 推送PublicKey :return: 只返回推送成功的,失败的直接写错误日志 """ # server_list = [('47.100.231.147', 22, 'root', '-----BEGIN RSA PRIVATE KEYxxxxxEND RSA PRIVATE KEY-----', 'false')] ins_log.read_log('info', 'rsync public key to server') rsync_error_list = [] rsync_sucess_list = [] sync_key_obj = RsyncPublicKey() check = sync_key_obj.check_rsa() if check: res_data = start_rsync(server_list) if not res_data.get('status'): rsync_error_list.append(res_data) else: rsync_sucess_list.append(res_data) if rsync_error_list: write_error_log(rsync_error_list) return rsync_sucess_list
94c9941e3f63caf15b0df8c19dc91ee54d002316
3,655,845
import re from bs4 import BeautifulSoup def create_one(url, alias=None): """ Shortens a URL using the TinyURL API. """ if url != '' and url is not None: regex = re.compile(pattern) searchres = regex.search(url) if searchres is not None: if alias is not None: if alias != '': payload = { 'url': url, 'submit': 'Make TinyURL!', 'alias': alias } data = parse_helper.urlencode(payload) full_url = API_CREATE_LIST[1] + data ret = request_helper.urlopen(full_url) soup = BeautifulSoup(ret, 'html.parser') check_error = soup.p.b.string if 'The custom alias' in check_error: raise errors.AliasUsed( "The given Alias you have provided is already" " being used.") else: return soup.find_all( 'div', {'class': 'indent'} )[1].b.string else: raise errors.InvalidAlias( "The given Alias cannot be 'empty'.") else: url_data = parse_helper.urlencode(dict(url=url)) byte_data = str.encode(url_data) ret = request_helper.urlopen( API_CREATE_LIST[0], data=byte_data).read() result = str(ret).replace('b', '').replace("\'", '') return result else: raise errors.InvalidURL("The given URL is invalid.") else: raise errors.URLError("The given URL Cannot be 'empty'.")
a543c23bc694fe09bae3bb4d59802fa6a5c3897d
3,655,846
def duplicate_each_element(vector: tf.Tensor, repeat: int): """This method takes a vector and duplicates each element the number of times supplied.""" height = tf.shape(vector)[0] exp_vector = tf.expand_dims(vector, 1) tiled_states = tf.tile(exp_vector, [1, repeat]) mod_vector = tf.reshape(tiled_states, [repeat * height]) return mod_vector
5b8ea4307d5779929def59805bc5210d8e948a4d
3,655,847
def apk(actual, predicted, k=3): """ Computes the average precision at k. This function computes the average precision at k for single predictions. Parameters ---------- actual : int The true label predicted : list A list of predicted elements (order does matter) k : int, optional The maximum number of predicted elements Returns ------- score : double The average precision at k over the input lists """ if len(predicted) > k: predicted = predicted[:k] score = 0.0 num_hits = 0.0 for i, p in enumerate(predicted): if p == actual and p not in predicted[:i]: num_hits += 1.0 score += num_hits / (i+1.0) return score
27c8d1d03f5fe571f89378d1beb60cde9d82f27e
3,655,848
def make_predictor(model): """ Factory to build predictor based on model type provided Args: model (DeployedModel): model to use when instantiating a predictor Returns: BasePredictor Child: instantiated predictor object """ verify = False if model.example == '' else True if model.model_type == ModelType.vw: return VWPredictor(model=model, verify_on_load=verify) elif model.model_type == ModelType.sklearn: return SKLearnPredictor(model=model, sep=app.config.get('SKLEARN_SEPARATOR', None), verify_on_load=verify) else: raise ApiException(name='Invalid Input', message='unknown model type: {type}'.format(type=model.model_type))
87a89d179c28e971a3c29946e94105542686510e
3,655,849
def get(identifier: str) -> RewardScheme: """Gets the `RewardScheme` that matches with the identifier. Arguments: identifier: The identifier for the `RewardScheme` Raises: KeyError: if identifier is not associated with any `RewardScheme` """ if identifier not in _registry.keys(): raise KeyError( 'Identifier {} is not associated with any `RewardScheme`.'.format(identifier)) return _registry[identifier]()
574126cab1a1c1bd10ca2ada1fe626ba66910b11
3,655,850
def add_query_params(url: str, query_params: dict) -> str: """Add query params dict to a given url (which can already contain some query parameters).""" path_result = parse.urlsplit(url) base_url = path_result.path # parse existing query parameters if any existing_query_params = dict(parse.parse_qsl(path_result.query)) all_query_params = {**existing_query_params, **query_params} # add query parameters to url if any if all_query_params: base_url += "?" + parse.urlencode(all_query_params) return base_url
8ea28c2492343e0f7af3bac5d44751827dd6b7aa
3,655,851
def try_get_mark(obj, mark_name): """Tries getting a specific mark by name from an object, returning None if no such mark is found """ marks = get_marks(obj) if marks is None: return None return marks.get(mark_name, None)
1dd8b9635d836bbce16e795900d7ea9d154e5876
3,655,853
def timedelta_to_seconds(ts): """ Convert the TimedeltaIndex of a pandas.Series into a numpy array of seconds. """ seconds = ts.index.values.astype(float) seconds -= seconds[-1] seconds /= 1e9 return seconds
4565d7a691e8ac004d9d529568db0d032a56d088
3,655,854
def parse_gage(s): """Parse a streamgage key-value pair. Parse a streamgage key-value pair, separated by '='; that's the reverse of ShellArgs. On the command line (argparse) a declaration will typically look like:: foo=hello or foo="hello world" :param s: str :rtype: tuple(key, value) """ # Adapted from: https://gist.github.com/fralau/061a4f6c13251367ef1d9a9a99fb3e8d items = s.split('=') key = items[0].strip() # we remove blanks around keys, as is logical value = '' if len(items) > 1: # rejoin the rest: value = '='.join(items[1:]) return key, value
299b47f3a4757c924620bdc05e74f195a4cb7967
3,655,855
from typing import Mapping def get_attribute(instance, attrs): """ Similar to Python's built in `getattr(instance, attr)`, but takes a list of nested attributes, instead of a single attribute. Also accepts either attribute lookup on objects or dictionary lookups. """ for attr in attrs: try: # pylint: disable=isinstance-second-argument-not-valid-type if isinstance(instance, Mapping): instance = instance[attr] else: instance = getattr(instance, attr) except ObjectDoesNotExist: return None return instance
121ef8d4b0b6b69fda1591e2f372a4cf9ec60129
3,655,856
from datetime import datetime def get_log_line_components(s_line): """ given a log line, returns its datetime as a datetime object and its log level as a string and the message itself as another string - those three are returned as a tuple. the log level is returned as a single character (first character of the level's name, capitalized). """ try: dtime = datetime.strptime(s_line[0:19], "%Y-%m-%d %H:%M:%S") except ValueError: raise LogUtilsError("Not a proper date/time at start of log line!") if dtime is None: raise LogUtilsError("Not a proper date/time at start of log line!") log_level = s_line[24] if log_level == "D": s_line = s_line[30:] elif log_level == "I": s_line = s_line[29:] elif log_level == "W": s_line = s_line[32:] elif log_level == "E": s_line = s_line[30:] elif log_level == "C": s_line = s_line[33:] else: raise LogUtilsError("log-level not in log line!") return s_line, dtime, log_level
3ec7e5418f39a579ce8b71f3c51a8e1356cb5291
3,655,858
def is_eligible_for_bulletpoint_vote(recipient, voter): """ Returns True if the recipient is eligible to receive an award. Checks to ensure recipient is not also the voter. """ if voter is None: return True return (recipient != voter) and is_eligible_user(recipient)
20d34076c92b7fd9474a7cf4edf7ca38ad3ffba5
3,655,859
def _get_in_collection_filter_directive(input_filter_name): """Create a @filter directive with in_collecion operation and the desired variable name.""" return DirectiveNode( name=NameNode(value=FilterDirective.name), arguments=[ ArgumentNode( name=NameNode(value="op_name"), value=StringValueNode(value="in_collection"), ), ArgumentNode( name=NameNode(value="value"), value=ListValueNode( values=[ StringValueNode(value="$" + input_filter_name), ], ), ), ], )
3c8b18314aa415d6dbec14b63a956e5fdf73aa9d
3,655,860
def load_labelmap(path): """Loads label map proto. Args: path: path to StringIntLabelMap proto text file. Returns: a StringIntLabelMapProto """ with tf.gfile.GFile(path, 'r') as fid: label_map_string = fid.read() label_map = string_int_label_map_pb2.StringIntLabelMap() try: text_format.Merge(label_map_string, label_map) except text_format.ParseError: label_map.ParseFromString(label_map_string) _validate_label_map(label_map) return label_map
3ec29d2dc8fc4bacde5f0dfa49465676f5e8c44c
3,655,861
def calc_internal_hours(entries): """ Calculates internal utilizable hours from an array of entry dictionaries """ internal_hours = 0.0 for entry in entries: if entry['project_name'][:22] == "TTS Acq / Internal Acq" and not entry['billable']: internal_hours = internal_hours + float(entry['hours_spent']) return internal_hours
0962ee49f60ac296668294e6d2f075ce981cbc55
3,655,862
def format_str_strip(form_data, key): """ """ if key not in form_data: return '' return form_data[key].strip()
44c5aaf8c5e11bfee05971d2961e5dcaf4cd8d9f
3,655,863
def get_element(element_path: str): """ For base extension to get main window's widget,event and function,\n pay attention,you must be sure the element's path grammar: element's path (father>attribute>attribute...) like UI_WIDGETS>textViewer """ try: listed_element_path = element_path.split('>') attribute = getattr(top, listed_element_path[0]) for nowAttributeName in listed_element_path[1:]: attribute = getattr(attribute, nowAttributeName) return attribute except Exception as msg: print(msg) return None
041ec89a700018ce5a6883a80a1998d7179c7041
3,655,864
import urllib def gravatar_for_email(email, size=None, rating=None): """ Generates a Gravatar URL for the given email address. Syntax:: {% gravatar_for_email <email> [size] [rating] %} Example:: {% gravatar_for_email [email protected] 48 pg %} """ gravatar_url = "%savatar/%s" % (GRAVATAR_URL_PREFIX, _get_gravatar_id(email)) parameters = [p for p in ( ('d', GRAVATAR_DEFAULT_IMAGE), ('s', size or GRAVATAR_DEFAULT_SIZE), ('r', rating or GRAVATAR_DEFAULT_RATING), ) if p[1]] if parameters: gravatar_url += '?' + urllib.urlencode(parameters, doseq=True) return gravatar_url
73f3eed5ea073cd4bf6e4a978983c4ed12cedcd6
3,655,865
def decmin_to_decdeg(pos, decimals=4): """Convert degrees and decimal minutes into decimal degrees.""" pos = float(pos) output = np.floor(pos / 100.) + (pos % 100) / 60. return round_value(output, nr_decimals=decimals)
de6490ce5278090b90f87adab57fe8b912307e2c
3,655,866
def get_selector(selector_list, identifiers, specified_workflow=None): """ Determine the correct workflow selector from a list of selectors, series of identifiers and user specified workflow if defined. Parameters ---------- selector_list list List of dictionaries, where the value of all dictionaries are workflow selectors. identifiers list List of identifiers specified in order of precedence that are to be looked up in selector_list. specified_workflow str User specified workflow for build. Returns ------- selector(BasicWorkflowSelector) selector object which can specify a workflow configuration that can be passed to `aws-lambda-builders` """ # Create a combined view of all the selectors all_selectors = {} for selector in selector_list: all_selectors = {**all_selectors, **selector} # Check for specified workflow being supported at all and if it's not, raise an UnsupportedBuilderException. if specified_workflow and specified_workflow not in all_selectors: raise UnsupportedBuilderException("'{}' does not have a supported builder".format(specified_workflow)) # Loop through all identifers to gather list of selectors with potential matches. selectors = [all_selectors.get(identifier, None) for identifier in identifiers] # Intialize a `None` selector. selector = None try: # Find first non-None selector. # Return the first selector with a match. selector = next(_selector for _selector in selectors if _selector) except StopIteration: pass return selector
f458a82d2d0e81070eefabd490127567a1b67bbb
3,655,867
import functools import inspect import re def register_pth_hook(fname, func=None): """ :: # Add a pth hook. @setup.register_pth_hook("hook_name.pth") def _hook(): '''hook contents.''' """ if func is None: return functools.partial(register_pth_hook, fname) source = inspect.getsource(func) if not re.match( rf"@setup\.register_pth_hook.*\ndef {re.escape(func.__name__)}\(", source): raise SyntaxError("register_pth_hook must be used as a toplevel " "decorator to a function") _, source = source.split("\n", 1) _pth_hook_mixin._pth_hooks.append((fname, func.__name__, source))
1090d4601e0d51ec4c7761bb070318f906c23f87
3,655,868
def callable_or_raise(obj): """Check that an object is callable, else raise a :exc:`ValueError`. """ if not callable(obj): raise ValueError('Object {0!r} is not callable.'.format(obj)) return obj
cb6dd8c03ea41bb94a8357553b3f3998ffcc0d65
3,655,869
def conv_coef(posture="standing", va=0.1, ta=28.8, tsk=34.0,): """ Calculate convective heat transfer coefficient (hc) [W/K.m2] Parameters ---------- posture : str, optional Select posture from standing, sitting or lying. The default is "standing". va : float or iter, optional Air velocity [m/s]. If iter is input, its length should be 17. The default is 0.1. ta : float or iter, optional Air temperature [oC]. If iter is input, its length should be 17. The default is 28.8. tsk : float or iter, optional Skin temperature [oC]. If iter is input, its length should be 17. The default is 34.0. Returns ------- hc : numpy.ndarray Convective heat transfer coefficient (hc) [W/K.m2]. """ # Natural convection if posture.lower() == "standing": # Ichihara et al., 1997, https://doi.org/10.3130/aija.62.45_5 hc_natural = np.array([ 4.48, 4.48, 2.97, 2.91, 2.85, 3.61, 3.55, 3.67, 3.61, 3.55, 3.67, 2.80, 2.04, 2.04, 2.80, 2.04, 2.04,]) elif posture.lower() in ["sitting", "sedentary"]: # Ichihara et al., 1997, https://doi.org/10.3130/aija.62.45_5 hc_natural = np.array([ 4.75, 4.75, 3.12, 2.48, 1.84, 3.76, 3.62, 2.06, 3.76, 3.62, 2.06, 2.98, 2.98, 2.62, 2.98, 2.98, 2.62,]) elif posture.lower() in ["lying", "supine"]: # Kurazumi et al., 2008, https://doi.org/10.20718/jjpa.13.1_17 # The values are applied under cold environment. hc_a = np.array([ 1.105, 1.105, 1.211, 1.211, 1.211, 0.913, 2.081, 2.178, 0.913, 2.081, 2.178, 0.945, 0.385, 0.200, 0.945, 0.385, 0.200,]) hc_b = np.array([ 0.345, 0.345, 0.046, 0.046, 0.046, 0.373, 0.850, 0.297, 0.373, 0.850, 0.297, 0.447, 0.580, 0.966, 0.447, 0.580, 0.966,]) hc_natural = hc_a * (abs(ta - tsk) ** hc_b) # Forced convection # Ichihara et al., 1997, https://doi.org/10.3130/aija.62.45_5 hc_a = np.array([ 15.0, 15.0, 11.0, 17.0, 13.0, 17.0, 17.0, 20.0, 17.0, 17.0, 20.0, 14.0, 15.8, 15.1, 14.0, 15.8, 15.1,]) hc_b = np.array([ 0.62, 0.62, 0.67, 0.49, 0.60, 0.59, 0.61, 0.60, 0.59, 0.61, 0.60, 0.61, 0.74, 0.62, 0.61, 0.74, 0.62,]) hc_forced = hc_a * (va ** hc_b) # Select natural or forced hc. # If local va is under 0.2 m/s, the hc valuse is natural. hc = np.where(va<0.2, hc_natural, hc_forced) # hc [W/K.m2)] return hc
d351b82d2ffb81396b4e0ce2f05b429cb79ac28c
3,655,871
def _one_formula(lex, fmt, varname, nvars): """Return one DIMACS SAT formula.""" f = _sat_formula(lex, fmt, varname, nvars) _expect_token(lex, {RPAREN}) return f
166c73c6214a0f6e3e6267804d2dd5c16b43a652
3,655,872
def _split_variables(variables): """Split variables into always passed (std) and specified (file). We always pass some variables to each step but need to explicitly define file and algorithm variables so they can be linked in as needed. """ file_vs = [] std_vs = [] for v in variables: cur_type = v["type"] while isinstance(cur_type, dict): if "items" in cur_type: cur_type = cur_type["items"] else: cur_type = cur_type["type"] if (cur_type in ["File", "null", "record"] or (isinstance(cur_type, (list, tuple)) and ("File" in cur_type or {'items': 'File', 'type': 'array'} in cur_type))): file_vs.append(v) elif v["id"] in ALWAYS_AVAILABLE: std_vs.append(v) else: file_vs.append(v) return file_vs, std_vs
2b297bf99153256769d42c3669f3f8f29da95b70
3,655,873
import numpy def percentiles_fn(data, columns, values=[0.0, 0.25, 0.5, 0.75, 1.0], remove_missing=False): """ Task: Get the data values corresponding to the percentile chosen at the "values" (array of percentiles) after sorting the data. return -1 if no data was found :param data: data structure for partitioning :type data: numpy.ndarray :param columns: columns or variable names of the data to be used :type columns: str array :param values: percentile values to be processed :type values: float array :param remove_missing: flag to remove missing values :type remove_missing: boolean """ result = -1 n_elements = data[columns[0]].shape[0] if n_elements <= 0: return result if remove_missing: data = nomi(data, columns) n_elements = data[columns[0]].shape[0] values = numpy.array(values) if max(values) > 1.0: values = values * 0.01 #### Get an array of indices of the sorted data sorted_index_arr = numpy.argsort(data[columns[0]]) ind = None #### Iterate through each percentile and get the corresponding #### value at that percentile of the sorted data for i in range(len(values)): if (values[i] < 0.0) or (values[i] > 1.0): return -1 #### Setting ind to the percentile wanted if values[i] <= 0.5: ind = int(values[i] * n_elements) else: ind = int(values[i] * (n_elements + 1)) if ind >= n_elements: ind = n_elements - int(1) if i == 0: result = data[columns[0]][sorted_index_arr[ind]] else: result = numpy.append(result, data[columns[0]][sorted_index_arr[ind]]) return result
cfacd575e3e1f8183b1e82512859198a973a1f85
3,655,874
def base_checkout_total( subtotal: TaxedMoney, shipping_price: TaxedMoney, discount: Money, currency: str, ) -> TaxedMoney: """Return the total cost of the checkout.""" zero = zero_taxed_money(currency) total = subtotal + shipping_price - discount # Discount is subtracted from both gross and net values, which may cause negative # net value if we are having a discount that covers whole price. # Comparing TaxedMoney objects works only on gross values. That is why we are # explicitly returning zero_taxed_money if total.gross is less or equal zero. if total.gross <= zero.gross: return zero return total
04017f67249b2415779b8a7bbfa854653ec6c285
3,655,875
def if_statement(lhs='x', op='is', rhs=0, _then=None, _else=None): """Celery Script if statement. Kind: _if Arguments: lhs (left-hand side) op (operator) rhs (right-hand side) _then (id of sequence to execute on `then`) _else (id of sequence to execute on `else`) """ args = {} args['lhs'] = lhs args['op'] = op args['rhs'] = rhs if _then is None: _then_kind = 'nothing' _then_args = {} else: _then_kind = 'execute' _then_args = {"sequence_id": _then} if _else is None: _else_kind = 'nothing' _else_args = {} else: _else_kind = 'execute' _else_args = {"sequence_id": _else} args['_then'] = create_node(kind=_then_kind, args=_then_args) args['_else'] = create_node(kind=_else_kind, args=_else_args) _if_statement = create_node(kind='_if', args=args) return _if_statement
c42baa0933be08e89049894acfd3c003832331db
3,655,876
def add_next_open(df, col='next_open'): """ 找出下根K线的开盘价 """ df[col] = df[CANDLE_OPEN_COLUMN].shift(-1) df[col].fillna(value=df[CANDLE_CLOSE_COLUMN], inplace=True) return df
185fdd87b437546be63548506adef7bb56c4aa5d
3,655,877
def seasons_used(parameters): """ Get a list of the seasons used for this set of parameters. """ seasons_used = set([s for p in parameters for s in p.seasons]) # Make sure this list is ordered by SEASONS. return [season for season in SEASONS if season in seasons_used]
641e0b4dd01bd30bf9129a9302ad5935a614588f
3,655,878
def get_polyphyletic(cons): """get polyphyletic groups and a representative tip""" tips, taxonstrings = unzip(cons.items()) tree, lookup = make_consensus_tree(taxonstrings, False, tips=tips) cache_tipnames(tree) names = {} for n in tree.non_tips(): if n.name is None: continue if (n.name, n.Rank) not in names: names[(n.name, n.Rank)] = {} if n.parent is not None: names[(n.name, n.Rank)][n.parent.name] = n.tip_names[0] return names
b53a50170b3546f8228aa82013545148918155b7
3,655,879
from typing import Tuple from typing import cast def find_closest_integer_in_ref_arr(query_int: int, ref_arr: NDArrayInt) -> Tuple[int, int]: """Find the closest integer to any integer inside a reference array, and the corresponding difference. In our use case, the query integer represents a nanosecond-discretized timestamp, and the reference array represents a numpy array of nanosecond-discretized timestamps. Instead of sorting the whole array of timestamp differences, we just take the minimum value (to speed up this function). Args: query_int: query integer, ref_arr: Numpy array of integers Returns: integer, representing the closest integer found in a reference array to a query integer, representing the integer difference between the match and query integers """ closest_ind = np.argmin(np.absolute(ref_arr - query_int)) closest_int = cast(int, ref_arr[closest_ind]) # mypy does not understand numpy arrays int_diff = np.absolute(query_int - closest_int) return closest_int, int_diff
9d0e43d869b94008fb51b1281041538a85d48d7e
3,655,880
def saver_for_file(filename): """ Returns a Saver that can load the specified file, based on the file extension. None if failed to determine. :param filename: the filename to get the saver for :type filename: str :return: the associated saver instance or None if none found :rtype: Saver """ saver = javabridge.static_call( "weka/core/converters/ConverterUtils", "getSaverForFile", "(Ljava/lang/String;)Lweka/core/converters/AbstractFileSaver;", filename) if saver is None: return None else: return Saver(jobject=saver)
0838a46be5a282849fdf48584e9a8e971b7ef966
3,655,881
def make(context, name): """Create an object in a registered table class. This function will be stored in that object, so that the new table object is able to create new table objects in its class. !!! hint This is needed when the user wants to insert new records in the table. Parameters ---------- context: object The context singleton in which this very function will be stored under attribute `mkTable`. name: string The registered name of the derived table class. """ tableObj = factory(name)(context, name) tableObj.mkTable = make return tableObj
2b87aa461f97c1d1e1c6ff9a8c6d4128d8eccbb3
3,655,882
def cofilter(function, iterator): """ Return items in iterator for which `function(item)` returns True. """ results = [] def checkFilter(notfiltered, item): if notfiltered == True: results.append(item) def dofilter(item): d = maybeDeferred(function, item) d.addCallback(checkFilter, item) return d d = _CoFunCaller(resultCollector=dofilter).coiterate(iterator) d.addCallback(lambda _: results) return d
0c14ce3310e1f1a2984b1faf5be21c552ca65b43
3,655,883
def download_dataset(dataset_name='mnist'): """ Load MNIST dataset using keras convenience function Args: dataset_name (str): which of the keras datasets to download dtype (np.dtype): Type of numpy array Returns tuple[np.array[float]]: (train images, train labels), (test images, test labels) """ if dataset_name == 'mnist': return tf.keras.datasets.mnist.load_data() elif dataset_name == 'binarised_mnist': return load_binarised_mnist_data()
c4bda5981acaf1907d46724f217012bf9349e9da
3,655,884
from typing import Type from textwrap import dedent def create_trigger_function_sql( *, audit_logged_model: Type[Model], context_model: Type[Model], log_entry_model: Type[Model], ) -> str: """ Generate the SQL to create the function to log the SQL. """ trigger_function_name = f"{ audit_logged_model._meta.db_table }_log_change" context_table_name = context_model._meta.db_table # noqa context_fields = ", ".join( field.column for field in context_model._meta.get_fields() # noqa if isinstance(field, Field) and not isinstance(field, AutoField) ) log_entry_table_name = log_entry_model._meta.db_table return dedent( f""" CREATE FUNCTION { trigger_function_name }() RETURNS TRIGGER AS $$ DECLARE -- Id of the inserted row, used to ensure exactly one row is inserted entry_id int; content_type_id int; BEGIN SELECT id INTO STRICT content_type_id FROM django_content_type WHERE app_label = '{ audit_logged_model._meta.app_label }' AND model = '{ audit_logged_model._meta.model_name }'; IF (TG_OP = 'INSERT') THEN INSERT INTO { log_entry_table_name } ( { context_fields }, action, at, changes, content_type_id, object_id ) SELECT { context_fields }, TG_OP as action, now() as at, to_jsonb(NEW.*) as changes, content_type_id, NEW.id as object_id -- We rely on this table being created by out Django middleware FROM { context_table_name } -- We return the id into the variable to make postgresql check -- that exactly one row is inserted. RETURNING id INTO STRICT entry_id; RETURN NEW; ELSIF (TG_OP = 'UPDATE') THEN INSERT INTO { log_entry_table_name } ( { context_fields }, action, at, changes, content_type_id, object_id ) SELECT { context_fields }, TG_OP as action, now() as at, ( SELECT -- Aggregate back to a single jsonb object, with -- column name as key and the two values in an array. jsonb_object_agg( COALESCE(old_row.key, new_row.key), ARRAY[old_row.value, new_row.value] ) FROM -- Select key value pairs from the old and the new -- row, and then join them on the key. THis gives -- us rows with the same key and values from both -- the old row and the new row. jsonb_each(to_jsonb(OLD.*)) old_row FULL OUTER JOIN jsonb_each(to_jsonb(NEW.*)) new_row ON old_row.key = new_row.key WHERE -- Only select rows that have actually changed old_row.* IS DISTINCT FROM new_row.* ) as changes, content_type_id, NEW.id as object_id -- We rely on this table being created by out Django middleware FROM { context_table_name } -- We return the id into the variable to make postgresql check -- that exactly one row is inserted. RETURNING id INTO STRICT entry_id; RETURN NEW; ELSIF (TG_OP = 'DELETE') THEN INSERT INTO { log_entry_table_name } ( { context_fields }, action, at, changes, content_type_id, object_id ) SELECT { context_fields }, TG_OP as action, now() as at, to_jsonb(OLD.*) as changes, content_type_id, OLD.id as object_id -- We rely on this table being created by out Django middleware FROM { context_table_name } -- We return the id into the variable to make postgresql check -- that exactly one row is inserted. RETURNING id INTO STRICT entry_id; RETURN NEW; END IF; END; $$ language 'plpgsql'; """ )
696443cee7752b74542d259d4a223f419462d18f
3,655,886
def reorder_by_first(*arrays): """ Applies the same permutation to all passed arrays, permutation sorts the first passed array """ arrays = check_arrays(*arrays) order = np.argsort(arrays[0]) return [arr[order] for arr in arrays]
bd9e60cadba4644b06ae55396c7dcae33f1fa1d0
3,655,887
def embedding_weights(mesh, vocab_dim, output_dim, variable_dtype, name="embedding", ensemble_dim=None, initializer=None): """Embedding weights.""" if not ensemble_dim: ensemble_dim = [] elif not isinstance(ensemble_dim, list): ensemble_dim = [ensemble_dim] shape = mtf.Shape(ensemble_dim) + [vocab_dim, output_dim] if initializer is None: initializer = tf.random_normal_initializer() ret = mtf.get_variable( mesh, name, shape, dtype=variable_dtype, initializer=initializer) return ret
b89d5a411757d704c57baff6e4a74b7a5807c381
3,655,888
def generiraj_emso(zenska): """Funkcija generira emso stevilko""" rojstvo = random_date_generator(julijana_zakrajsek) # Odstranim prvo števko leta emso_stevke = rojstvo[:4] + rojstvo[5:] if zenska: # Malce pretirana poenostavitev zadnjih treh cifer, lahko se zgodi da pridejo iste + zanemarjam take, ki imajo niclo na zacetku,... return (emso_stevke + '505' + str(np.random.randint(100, 999))) else: return (emso_stevke + '500' + str(np.random.randint(100, 999)))
89734021fd0d6f863a309b5c23c0a4ee6d385edf
3,655,889
def pdf_markov2(x, y, y_offset=1, nlevels=3): """ Compute the empirical joint PDF for two processes of Markov order 2. This version is a bit quicker than the more general pdf() function. See the docstring for pdf for more info. """ y_offset = np.bool(y_offset) # out = np.ones((nlevels,)*6, np.uint32) out = np.zeros((nlevels,) * 5, np.float64) n = x.size for tt in xrange(2, x.size): # out[x[tt], x[tt - 1], x[tt - 2], y[tt], y[tt - 1], y[tt - 2]] += 1 # offset signal y by +1 if we want to allow same-timebin interactions out[x[tt], x[tt - 1], x[tt - 2], y[tt - 1 + y_offset], y[tt - 2 + y_offset]] += 1 return out / (n - 2.)
6d789d1ef9ff88c27f610e9904bdbc27fbe10e5b
3,655,890
def check_if_recipe_skippable(recipe, channels, repodata_dict, actualname_to_idname): """ check_if_recipe_skippable ========================= Method used to check if a recipe should be skipped or not. Skip criteria include: - If the version of the recipe in the channel repodata is greater than or equal to the query recipe. - If the query recipe's version and build are equal to or less than the recipe in the repodata Non-Skip Citeria include: - Opposite of skip criteria - If the recipe is not in any channel Parameters: ----------- 1) recipe: (str) The directory path to the query recipe 2) channels: (list) A list of channels to check against 3) repodata_dict: (dict) A dictionary of repodata by channel (From get_repodata() method) 4) actualname_to_idname: (dict) Dict of recipe names as keys as id names in the repodata_dict as keys. (From get_repodata() method) Returns: ++++++++ - Return True if recipe building is skippable - Return False if recipe building cannot be skipped """ platform, metas = load_platform_metas(recipe, finalize=False) # The recipe likely defined skip: True if not metas: return True ## Get each packages name, version, and build number packages = set( (meta.name(), float(meta.version()), float(meta.build_number() or 0)) for meta in metas ) for name, version, build_num in packages: present = False for c in channels: ## Check for the recipe in one of the channel's repodata if name in actualname_to_idname[c].keys(): ## Find the newest/highest versioned and build package present = True cur_version = -1.0 cur_build = -1.0 for pkg_tar in actualname_to_idname[c][name]: repo_version = float(repodata_dict[c][pkg_tar]["version"]) repo_build_number = float(repodata_dict[c][pkg_tar]["build_number"]) ## If version is greater than the previous version, reset values with this package if repo_version > cur_version: cur_version = repo_version cur_build = repo_build_number ## If version is the same but the build number is greater, reset values with this package elif version == cur_version and repo_build_number > cur_build: cur_build = repo_build_number ## Check if the query package is newer then what is repoted in the repodata or not ## If the query package's version is greater than the best in the repodata, update recipe if cur_version < version: return False ## If the query package's is the same version but the build number is greater than the best in the repodata, update recipe elif cur_version == version and cur_build < build_num: return False ## If package not already in the repodata if not present: return False print( ":ggd:build recipes: FILTER: not building recipe {} because the version and/or build number match what is already in the channel and not forced".format( recipe ) ) return True
604fdcf86ec45826f53fd837d165b234e9d11d91
3,655,893
def hello(name=None): """Assuming that name is a String and it checks for user typos to return a name with a first capital letter (Xxxx). Args: name (str): A persons name. Returns: str: "Hello, Name!" to a given name, or says Hello, World! if name is not given (or passed as an empty String). """ return "Hello, World!" if name is None or not name else "Hello, {}!".format(name.title())
f1aafbebd49507fd5417d8752f98ae7d0af8ec33
3,655,895
def computePCA(inputMatrix, n_components=None): """Compute Principle Component Analysis (PCA) on feature space. n_components specifies the number of dimensions in the transformed basis to keep.""" pca_ = PCA(n_components) pca_.fit(inputMatrix) return pca_
4061f998bfca9ed294b312ae746a63ea0eef8438
3,655,896
def tag(repo, subset, x): """The specified tag by name, or all tagged revisions if no name is given. Pattern matching is supported for `name`. See :hg:`help revisions.patterns`. """ # i18n: "tag" is a keyword args = getargs(x, 0, 1, _("tag takes one or no arguments")) cl = repo.changelog if args: pattern = getstring(args[0], # i18n: "tag" is a keyword _('the argument to tag must be a string')) kind, pattern, matcher = stringutil.stringmatcher(pattern) if kind == 'literal': # avoid resolving all tags tn = repo._tagscache.tags.get(pattern, None) if tn is None: raise error.RepoLookupError(_("tag '%s' does not exist") % pattern) s = {repo[tn].rev()} else: s = {cl.rev(n) for t, n in repo.tagslist() if matcher(t)} else: s = {cl.rev(n) for t, n in repo.tagslist() if t != 'tip'} return subset & s
d4ceadb7ef03ae6ed950c60c7bbf06b4d26f8671
3,655,897
def _embed_json(service, targetid): """ Returns oEmbed JSON for a given URL and service """ return d.http_get(_OEMBED_MAP[service] % (urlquote(targetid),)).json()
347d38e2b4f69c853e8085308e334b7cc778d4ad
3,655,898
import re def is_blank(s): """Returns True if string contains only space characters.""" return re.search(reNonSpace, s) is None
40b4ec62a2882d100b80fd951c6b9e4d31220581
3,655,899
def remove_invalid_chars_from_passage(passage_text): """ Return a cleaned passage if the passage is invalid. If the passage is valid, return None """ # Check if any of the characters are invalid bad_chars = [c for c in passage_text if c in INVALID_PASSAGE_CHARACTERS] if bad_chars: for b in set(bad_chars): passage_text = passage_text.replace(b, '') return passage_text
5eeac3393477c45ac361fb2ccbae194c83e47f25
3,655,900
import locale def fallback_humanize(date, fallback_format=None, use_fallback=False): """ Format date with arrow and a fallback format """ # Convert to local timezone date = arrow.get(date).to('local') # Set default fallback format if not fallback_format: fallback_format = '%Y/%m/%d %H:%M:%S' # Determine using fallback format or not by a variable if use_fallback: return date.datetime.strftime(fallback_format) try: # Use Arrow's humanize function lang, encode = locale.getdefaultlocale() clock = date.humanize(locale=lang) except: # Notice at the 1st time only if not dg['humanize_unsupported']: dg['humanize_unsupported'] = True printNicely( light_magenta('Humanized date display method does not support your $LC_ALL.')) # Fallback when LC_ALL is not supported clock = date.datetime.strftime(fallback_format) return clock
06a758cea23978d877d12cfead25b21140370094
3,655,901
from functools import reduce def min_column_widths(rows): """Computes the minimum column width for the table of strings. >>> min_column_widths([["some", "fields"], ["other", "line"]]) [5, 6] """ def lengths(row): return map(len, row) def maximums(row1, row2) : return map(max, row1, row2) return reduce(maximums, map(lengths, rows))
36722e4250dde561836c1ea3042b796ed7650986
3,655,904
def entities(address_book): """Get the entities utility.""" return zope.component.getUtility(IEntities)
6c64c5c8b8d0048425dcd91baf265134fbb2e96e
3,655,905
from renku.core.management.migrations.models.v9 import Project import pathlib def generate_dataset_file_url(client, filepath): """Generate url for DatasetFile.""" if not client: return try: if not client.project: return project = client.project except ValueError: metadata_path = client.renku_path.joinpath(OLD_METADATA_PATH) project = Project.from_yaml(metadata_path) project_id = urlparse(project._id) else: project_id = urlparse(project.id) filepath = quote(filepath, safe="/") path = pathlib.posixpath.join(project_id.path, "files", "blob", filepath) project_id = project_id._replace(path=path) return project_id.geturl()
1aa3a97cfff523e0b7d7718c39dfb9935160e193
3,655,906
def _check_attrs(obj): """Checks that a periodic function/method has all the expected attributes. This will return the expected attributes that were **not** found. """ missing_attrs = [] for attr_name in _REQUIRED_ATTRS: if not hasattr(obj, attr_name): missing_attrs.append(attr_name) return missing_attrs
6a3326616aa5d1cd083f99a2e0f4c57f6f5a11c6
3,655,907
def TokenStartBlockElement(block): """ `TokenStartBlockElement` is used to denote that we are starting a new block element. Under most circumstances, this token will not render anything. """ return { "type": "SpaceCharacters", "data": "", "_md_type": mdTokenTypes["TokenStartBlockElement"], "_md_block": block, }
c7690b2ca7babc0cc5d6e36a8b8ecb33ad463294
3,655,908
import json def parse_json(json_path): """ Parse training params json file to python dictionary :param json_path: path to training params json file :return: python dict """ with open(json_path) as f: d = json.load(f) return d
c34b241813996a8245ea8c334de72f0fbffe8a31
3,655,909
from unittest.mock import call def cut_tails(fastq, out_dir, trimm_adapter, trimm_primer, hangF, hangR): """ The functuion ... Parameters ---------- reads : str path to ... out_dir : str path to ... hang1 : str Sequence ... hang2 : str Sequence ... Returns ------- - """ output = fastq # cut barcodes if trimm_adapter == True: call('porechop -i {} --verbosity 0 -t 100 --require_two_barcodes --extra_end_trim 0 -o {}/trimmed_barcode.fastq'.format(fastq, out_dir), shell=True) fastq = out_dir + "/trimmed_barcode.fastq" output = out_dir + "/trimmed_barcode.fastq" # cut primers if trimm_primer == True: opn_fastq = parse(fastq, 'fastq') # cut primers with open('{}/trimmed_primer.fastq'.format(out_dir), 'w') as trimmed_fasta: for record in opn_fastq: for idx in range(4): if idx != 1 or idx != 3: trimmed_fasta.write(record.format('fastq').split('\n')[idx] + '\n') else: trimmed_fasta.write(record.format('fastq').split('\n')[idx][len(hangF): -len(hangR)] + '\n') output = '{}/trimmed_primer.fastq'.format(out_dir) return output
8e4ef0b24d5ecf22aa298a0e4e8cddeb7d681945
3,655,910
def load_spelling(spell_file=SPELLING_FILE): """ Load the term_freq from spell_file """ with open(spell_file, encoding="utf-8") as f: tokens = f.read().split('\n') size = len(tokens) term_freq = {token: size - i for i, token in enumerate(tokens)} return term_freq
236cb5306632990e1eefcf308dea224890ccd035
3,655,912
def NotP8(): """ Return the matroid ``NotP8``. This is a matroid that is not `P_8`, found on page 512 of [Oxl1992]_ (the first edition). EXAMPLES:: sage: M = matroids.named_matroids.P8() sage: N = matroids.named_matroids.NotP8() sage: M.is_isomorphic(N) False sage: M.is_valid() True """ A = Matrix(GF(3), [ [1, 0, 0, 0, 0, 1, 1, -1], [0, 1, 0, 0, 1, 0, 1, 1], [0, 0, 1, 0, 1, 1, 0, 1], [0, 0, 0, 1, -1, 1, 1, 1] ]) M = TernaryMatroid(A, 'abcdefgh') M.rename('NotP8: ' + repr(M)) return M
d475810244338532f4611b120aa15b4776bd2aeb
3,655,913
def eqv(var_inp): """Returns the von-mises stress of a Field or FieldContainer Returns ------- field : ansys.dpf.core.Field, ansys.dpf.core.FieldContainer The von-mises stress of this field. Output type will match input type. """ if isinstance(var_inp, dpf.core.Field): return _eqv(var_inp) elif isinstance(var_inp, dpf.core.FieldsContainer): return _eqv_fc(var_inp) # elif isinstance(var_inp, dpf.core.Operator): # return _eqv_op(var_inp) else: raise TypeError('Input type must be a Field or FieldContainer')
5977b1317fc5bfa43c796b95680a7b2a21ae4553
3,655,914
def sort(array: list[int]) -> list[int]: """Counting sort implementation. """ result: list[int] = [0, ] * len(array) low: int = min(array) high: int = max(array) count_array: list[int] = [0 for i in range(low, high + 1)] for i in array: count_array[i - low] += 1 for j in range(1, len(count_array)): count_array[j] += count_array[j - 1] for k in reversed(array): result[count_array[k - low] - 1] = k count_array[k - low] -= 1 return result
86864db6e012d5e6afcded3365d6f2ca35a5b94b
3,655,915
def build_updated_figures( df, colorscale_name ): """ Build all figures for dashboard Args: - df: census 2010 dataset (cudf.DataFrame) - colorscale_name Returns: tuple of figures in the following order (datashader_plot, education_histogram, income_histogram, cow_histogram, age_histogram, n_selected_indicator, coordinates_4326_backup, position_backup) """ colorscale_transform = 'linear' education_histogram = build_histogram_default_bins( df, 'education', 'v', colorscale_name, colorscale_transform ) income_histogram = build_histogram_default_bins( df, 'income', 'v', colorscale_name, colorscale_transform ) cow_histogram = build_histogram_default_bins( df, 'cow', 'v', colorscale_name, colorscale_transform ) age_histogram = build_histogram_default_bins( df, 'age', 'v', colorscale_name, colorscale_transform ) return ( education_histogram, income_histogram, cow_histogram, age_histogram, )
01dd0f298f662f40919170b4e37c533bd3ba443b
3,655,916
from typing import Optional def get_or_else_optional(optional: Optional[_T], alt_value: _T) -> _T: """ General-purpose getter for `Optional`. If it's `None`, returns the `alt_value`. Otherwise, returns the contents of `optional`. """ if optional is None: return alt_value return optional
340fc67adc9e73d748e3c03bec9d20e1646e894c
3,655,918
def create_dic(udic): """ Create a glue dictionary from a universal dictionary """ return udic
aa854bb8f4d23da7e37aa74727446d7436524fe2
3,655,919