content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def stemmer(stemmed_sent): """ Removes stop words from a tokenized sentence """ porter = PorterStemmer() stemmed_sentence = [] for word in literal_eval(stemmed_sent): stemmed_word = porter.stem(word) stemmed_sentence.append(stemmed_word) return stemmed_sentence
96337684deb7846f56acf302d1e0d8c8ab9743dd
3,657,500
def _queue_number_priority(v): """Returns the task's priority. There's an overflow of 1 bit, as part of the timestamp overflows on the laster part of the year, so the result is between 0 and 330. See _gen_queue_number() for the details. """ return int(_queue_number_order_priority(v) >> 22)
e61d6e1d04551ce55a533bfe7805f3358bb8d0ca
3,657,501
def test_generator_aovs(path): """Generate a function testing given `path`. :param path: gproject path to test :return: function """ def test_func(self): """test render pass render layer and AOV particularities """ assert path in g_parsed p = g_parsed[path] aov = grl_util.aov_node(p, 'RenderPass', 'Layer', 'Beauty') self.assertIsInstance(aov, guerilla_parser.GuerillaNode) self.assertEqual(aov.path, "|RenderPass|Layer|Input1") rp_iter = (n for n in p.nodes if n.type == 'RenderPass') for rp in rp_iter: rl_iter = (n for n in rp.children if n.type == 'RenderLayer') for rl in rl_iter: for aov in rl.children: self.assertEqual(aov.type, "LayerOut") aov_2 = grl_util.aov_node(p, rp.name, rl.name, aov.display_name) self.assertIs(aov, aov_2) return test_func
a67b8f741a19f4d3733ab35699ef11a713e283b5
3,657,502
from typing import Union def delimited_list( expr: Union[str, ParserElement], delim: Union[str, ParserElement] = ",", combine: bool = False, min: OptionalType[int] = None, max: OptionalType[int] = None, *, allow_trailing_delim: bool = False, ) -> ParserElement: """Helper to define a delimited list of expressions - the delimiter defaults to ','. By default, the list elements and delimiters can have intervening whitespace, and comments, but this can be overridden by passing ``combine=True`` in the constructor. If ``combine`` is set to ``True``, the matching tokens are returned as a single token string, with the delimiters included; otherwise, the matching tokens are returned as a list of tokens, with the delimiters suppressed. If ``allow_trailing_delim`` is set to True, then the list may end with a delimiter. Example:: delimited_list(Word(alphas)).parse_string("aa,bb,cc") # -> ['aa', 'bb', 'cc'] delimited_list(Word(hexnums), delim=':', combine=True).parse_string("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE'] """ if isinstance(expr, str_type): expr = ParserElement._literalStringClass(expr) dlName = "{expr} [{delim} {expr}]...{end}".format( expr=str(expr.copy().streamline()), delim=str(delim), end=" [{}]".format(str(delim)) if allow_trailing_delim else "", ) if not combine: delim = Suppress(delim) if min is not None: if min < 1: raise ValueError("min must be greater than 0") min -= 1 if max is not None: if min is not None and max <= min: raise ValueError("max must be greater than, or equal to min") max -= 1 delimited_list_expr = expr + (delim + expr)[min, max] if allow_trailing_delim: delimited_list_expr += Opt(delim) if combine: return Combine(delimited_list_expr).set_name(dlName) else: return delimited_list_expr.set_name(dlName)
d1ac80f138a21ee21ecf76f918f1c7878863f80c
3,657,503
def get_minion_node_ips(k8s_conf): """ Returns a list IP addresses to all configured minion hosts :param k8s_conf: the configuration dict :return: a list IPs """ out = list() node_tuple_3 = get_minion_nodes_ip_name_type(k8s_conf) for hostname, ip, node_type in node_tuple_3: out.append(ip) return out
9a93ddcd025e605805a9693dd14d58c92f53dc42
3,657,504
def calculate_ri(column): """ Function that calculates radiant intensity """ return float(sc.h * sc.c / 1e-9 * np.sum(column))
eac136f520ebbad0ea11f506c742e75fc524c4bb
3,657,505
def find_kw_in_lines(kw, lines, addon_str=' = '): """ Returns the index of a list of strings that had a kw in it Args: kw: Keyword to find in a line lines: List of strings to search for the keyword addon_str: String to append to your key word to help filter Return: i: Integer of the index of a line containing a kw. -1 otherwise """ str_temp = '{}' + addon_str for i, line in enumerate(lines): s = str_temp.format(kw) uncommented = line.strip('#') if s in uncommented: if s[0] == uncommented[0]: break # No match if i == len(lines) - 1: i = -1 return i
4b50c4eaecc55958fca6b134cc748d672c78d014
3,657,506
def delete_group(current_session, groupname): """ Deletes a group """ projects_to_purge = gp.get_group_projects(current_session, groupname) remove_projects_from_group(current_session, groupname, projects_to_purge) gp.clear_users_in_group(current_session, groupname) gp.clear_projects_in_group(current_session, groupname) gp.delete_group(current_session, groupname) return {"result": "success"}
1a27cec1c3273bb56564587823ad04565867277f
3,657,507
def label_smoothed_nll_loss(lprobs, target, epsilon: float = 1e-8, ignore_index=None): """Adapted from fairseq Parameters ---------- lprobs Log probabilities of amino acids per position target Target amino acids encoded as integer indices epsilon Smoothing factor between 0 and 1, by default 1e-8 ignore_index, optional Amino acid (encoded as integer) to ignore, by default None Returns ------- Negative log-likelihood loss """ nll_loss = -lprobs.gather(dim=-1, index=target) smooth_loss = -lprobs.sum(dim=-1, keepdim=True) if ignore_index is not None: pad_mask = target.eq(ignore_index) nll_loss.masked_fill_(pad_mask, 0.0) smooth_loss.masked_fill_(pad_mask, 0.0) else: nll_loss = nll_loss.squeeze(-1) smooth_loss = smooth_loss.squeeze(-1) nll_loss = nll_loss.sum() smooth_loss = smooth_loss.sum() eps_i = epsilon / lprobs.size(-1) loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss return loss
eb09b7dd5c800b01b723f33cd0f7a84ae93b3489
3,657,508
def ParseFieldDefRequest(post_data, config): """Parse the user's HTML form data to update a field definition.""" field_name = post_data.get('name', '') field_type_str = post_data.get('field_type') # TODO(jrobbins): once a min or max is set, it cannot be completely removed. min_value_str = post_data.get('min_value') try: min_value = int(min_value_str) except (ValueError, TypeError): min_value = None max_value_str = post_data.get('max_value') try: max_value = int(max_value_str) except (ValueError, TypeError): max_value = None regex = post_data.get('regex') needs_member = 'needs_member' in post_data needs_perm = post_data.get('needs_perm', '').strip() grants_perm = post_data.get('grants_perm', '').strip() notify_on_str = post_data.get('notify_on') if notify_on_str in config_svc.NOTIFY_ON_ENUM: notify_on = config_svc.NOTIFY_ON_ENUM.index(notify_on_str) else: notify_on = 0 is_required = 'is_required' in post_data is_multivalued = 'is_multivalued' in post_data field_docstring = post_data.get('docstring', '') choices_text = post_data.get('choices', '') applicable_type = post_data.get('applicable_type', '') applicable_predicate = '' # TODO(jrobbins): placeholder for future feature revised_labels = _ParseChoicesIntoWellKnownLabels( choices_text, field_name, config) return ParsedFieldDef( field_name, field_type_str, min_value, max_value, regex, needs_member, needs_perm, grants_perm, notify_on, is_required, is_multivalued, field_docstring, choices_text, applicable_type, applicable_predicate, revised_labels)
73030f1757ebccf0f9d7710d24b11bf82c8b46c8
3,657,509
import os import time async def get_museum_session_key() -> str: """ Retrieve a session key for the MuseumPlus service, generating a new one if necessary. :returns: Session key """ # We might have an active session key stored locally. key_path = get_session_key_file_path() try: session_time = key_path.stat().st_mtime session_key = key_path.read_text() except FileNotFoundError: # Create the parent directories and/or file if they don't exist os.makedirs(key_path.parent, exist_ok=True) session_time = time.time() session_key = await generate_museum_session_key(previous_key=None) # Regenerate a session key if it *could* have expired. # This is done because the alternative is to test the session key for # validity each time a session is created, and this would create # more useless requests than regenerating a session key after the worker # has stayed dormant for a while; a far more unlikely scenario. maybe_expired = time.time() - SESSION_KEY_REGENERATE_TIMEOUT > session_time if maybe_expired: session_key = await generate_museum_session_key( previous_key=session_key ) return session_key
02890e4e67150cc3ce859861f28db1cbe1657837
3,657,510
import re def parse_date(regexen, date_str): """ Parse a messy string into a granular date `regexen` is of the form [ (regex, (granularity, groups -> datetime)) ] """ if date_str: for reg, (gran, dater) in regexen: m = re.match(reg, date_str) if m: try: return gran, dater(m.groups()) except ValueError: return 0, None return 0, None
a141cad6762556115699ca0327b801537bab1c7e
3,657,511
def PreNotebook(*args, **kwargs): """PreNotebook() -> Notebook""" val = _controls_.new_PreNotebook(*args, **kwargs) return val
1974d3ed08a6811a871f7e069c4b74b97cb32e35
3,657,512
def user_voted(message_id: int, user_id: int) -> bool: """ CHECK IF A USER VOTED TO A DETECTION REPORT """ return bool( c.execute( """ SELECT * FROM reports WHERE message_id=? AND user_id=? """, (message_id, user_id), ).fetchone() )
baddfb69470699d611c050b6732d553f4f415212
3,657,513
import io def get_values(wsdl_url, site_code, variable_code, start=None, end=None, suds_cache=("default",), timeout=None, user_cache=False): """ Retrieves site values from a WaterOneFlow service using a GetValues request. Parameters ---------- wsdl_url : str URL of a service's web service definition language (WSDL) description. All WaterOneFlow services publish a WSDL description and this url is the entry point to the service. site_code : str Site code of the site you'd like to get values for. Site codes MUST contain the network and be of the form <network>:<site_code>, as is required by WaterOneFlow. variable_code : str Variable code of the variable you'd like to get values for. Variable codes MUST contain the network and be of the form <vocabulary>:<variable_code>, as is required by WaterOneFlow. start : ``None`` or datetime (see :ref:`dates-and-times`) Start of the query datetime range. If omitted, data from the start of the time series to the ``end`` timestamp will be returned (but see caveat, in note below). end : ``None`` or datetime (see :ref:`dates-and-times`) End of the query datetime range. If omitted, data from the ``start`` timestamp to end of the time series will be returned (but see caveat, in note below). suds_cache : ``None`` or tuple SOAP local cache duration for WSDL description and client object. Pass a cache duration tuple like ('days', 3) to set a custom duration. Duration may be in months, weeks, days, hours, or seconds. If unspecified, the default duration (1 day) will be used. Use ``None`` to turn off caching. timeout : int or float suds SOAP URL open timeout (seconds). If unspecified, the suds default (90 seconds) will be used. user_cache : bool If False (default), use the system temp location to store cache WSDL and other files. Use the default user ulmo directory if True. Returns ------- site_values : dict a python dict containing values Notes ----- If both ``start`` and ``end`` parameters are omitted, the entire time series available will typically be returned. However, some service providers will return an error if either start or end are omitted; this is specially true for services hosted or redirected by CUAHSI via the CUAHSI HydroPortal, which have a 'WSDL' url using the domain http://hydroportal.cuahsi.org. For HydroPortal, a start datetime of '1753-01-01' has been known to return valid results while catching the oldest start times, though the response may be broken up into chunks ('paged'). """ suds_client = _get_client(wsdl_url, suds_cache, timeout, user_cache) # Note from Emilio: # Not clear if WOF servers really do handle time zones (time offsets or # "Z" in the iso8601 datetime strings. In the past, I (Emilio) have # passed naive strings to GetValues(). if a datetime object is passed to # this ulmo function, the isodate code above will include it in the # resulting iso8601 string; if not, no. Test effect of dt_isostr having # a timezone code or offset, vs not having it (the latter, naive dt # strings, is what I've been using all along) # the interpretation of start and end time zone is server-dependent start_dt_isostr = None end_dt_isostr = None if start is not None: start_datetime = util.convert_datetime(start) start_dt_isostr = isodate.datetime_isoformat(start_datetime) if end is not None: end_datetime = util.convert_datetime(end) end_dt_isostr = isodate.datetime_isoformat(end_datetime) waterml_version = _waterml_version(suds_client) response = suds_client.service.GetValues( site_code, variable_code, startDate=start_dt_isostr, endDate=end_dt_isostr) response_buffer = io.BytesIO(util.to_bytes(response)) if waterml_version == '1.0': values = waterml.v1_0.parse_site_values(response_buffer) elif waterml_version == '1.1': values = waterml.v1_1.parse_site_values(response_buffer) if not variable_code is None: return list(values.values())[0] else: return values
57b9cbfbf713f5ac858a8d7a36464aae2a657757
3,657,514
def GetDot1xInterfaces(): """Retrieves attributes of all dot1x compatible interfaces. Returns: Array of dict or empty array """ interfaces = [] for interface in GetNetworkInterfaces(): if interface['type'] == 'IEEE80211' or interface['type'] == 'Ethernet': if (interface['builtin'] and 'AppleThunderboltIPPort' not in interface['bus']): interfaces.append(interface) return interfaces
829cc1badf5917cc6302847311e5c8ef6aeebc11
3,657,515
def get_v_l(mol, at_name, r_ea): """ Returns list of the l's, and a nconf x nl array, v_l values for each l: l= 0,1,2,...,-1 """ vl = generate_ecp_functors(mol._ecp[at_name][1]) v_l = np.zeros([r_ea.shape[0], len(vl)]) for l, func in vl.items(): # -1,0,1,... v_l[:, l] = func(r_ea) return vl.keys(), v_l
d987e5ceb28169d73ec23aaac2f7ab30a5e881c7
3,657,516
def search_transitions_in_freq_range(freq_min, freq_max, atomic_number, atomic_mass, n_min=1, n_max=1000, dn_min=1, dn_max=10, z=0.0, screening=False, extendsearch=None): """ --------------------------------------------------------------------------- Search for electronic transitions of recombination lines at a specified redshift that lie within the specified frequency range Inputs: freq_min [scalar] Minimum in the frequency range (Hz) freq_max [scalar] Maximum in the frequency range (Hz) atomic_number [integer] Atomic number of the atom. It is equal to the number of protons in the nucleus. Must be positive and greater than or equal to unity. atomic_mass [integer] Atomic mass of the atom. It is equal to the sum of the number of protons and neutrons in the nucleus. Must be positive and greater than or equal to unity. n_min [scalar] Minimum in the range of principal quantum numbers of lower electron orbit to search for transitions. Must be positive and greater than or equal to unity unity. n_max [scalar] Maximum in the range of principal quantum numbers of lower electron orbit to search for transitions. Must be positive and greater than or equal to unity unity. dn_min [scalar] Minimum in the range of difference in principal quantum numbers search for transitions. Must be positive and greater than or equal to unity unity. dn_max [scalar] Maximum in the range of difference in principal quantum numbers search for transitions. Must be positive and greater than or equal to unity unity. z [scalar or numpy array] The redshift (when positive) or blueshift (when negative) by which the recombination lines are shifted. Default=0 screening [boolean] If set to False (default), assume the effective charge is equal to the number of protons. If set to True, assume the charges from the nucleus are screened and the effecctive nuclear charge is equal to unity. extendsearch [None or dictionary] Specifies if the search should be extended beyond the ranges for n and dn by calling this function recursively. If set to None (default), the search will not be extended. Otherwise, search will extend along n and/or dn if in-range frequencies are found at the specified boundaries of n and dn. This parameter must be specified as a dictionary with the following keys and values: 'n' [None or list] If set to None, do not extend search for more values of n. Otherwise it must be a list containing one or both of the strings 'up' and 'down'. If 'up' is present, extend search for higher values of n from the previous iteration. If 'down' is present in the list, extend search for values of n lower than specified in the range in previous iteration. 'dn' [None or list] If set to None, do not extend search for more values of dn. Otherwise it must be a list containing one or both of the strings 'up' and 'down'. If 'up' is present, extend search for higher values of dn from the previous iteration. If 'down' is present in the list, extend search for values of dn lower than specified in the range in previous iteration. Output: Tuple of (n, dn, freq) where each of the elements in the tuple is an array such that the transitions of combinations of n and dn produces recombination lines for a given redshift in the specified frequency range. freq will be returned as an instance of class astropy.units.Quantity --------------------------------------------------------------------------- """ try: freq_min, freq_max, atomic_number, atomic_mass except NameError: raise NameError('Inputs freq_min, freq_max, atomic_number, atomic_mass must be specified') if not isinstance(n_min, int): raise TypeError('Input n_min must be an integer') if n_min < 1: raise ValueError('Input n_min must be greater than 1') if not isinstance(n_max, int): raise TypeError('Input n_max must be an integer') if n_max < n_min: raise ValueError('Input n_max must be greater than n_min') if not isinstance(dn_min, int): raise TypeError('Input dn_min must be an integer') if dn_min < 1: raise ValueError('Input dn_min must be greater than 1') if not isinstance(dn_max, int): raise TypeError('Input dn_max must be an integer') if dn_max < dn_min: raise ValueError('Input dn_max must be greater than dn_min') if not isinstance(z, (int,float)): if isinstance(z, NP.ndarray): if z.size != 1: raise TypeError('Input z must be a scalar') else: raise TypeError('Input z must be a scalar') if not isinstance(freq_min, (int,float,units.Quantity)): raise TypeError('Input freq_min must be a scalar') if not isinstance(freq_min, units.Quantity): freq_min = freq_min * units.Hertz if freq_min <= 0.0 * units.Hertz: raise ValueError('Input freq_min must be positive') if not isinstance(freq_max, (int,float,units.Quantity)): raise TypeError('Input freq_max must be a scalar') if not isinstance(freq_max, units.Quantity): freq_max = freq_max * units.Hertz if freq_max <= freq_min: raise ValueError('Input freq_max must be greater than freq_min') if extendsearch is not None: if not isinstance(extendsearch, dict): raise TypeError('Input extendsearch must be a dictionary') for key in extendsearch: if extendsearch[key] is not None: if not isinstance(extendsearch[key], list): raise TypeError('Value under key {0} of input dictionary extendsearch must be a list'.format(key)) nvect = NP.arange(n_min, n_max+1) dnvect = NP.arange(dn_min, dn_max+1) ngrid, dngrid = NP.meshgrid(nvect, dnvect, indexing='ij') nu = redshifted_freq_recomb(atomic_number, atomic_mass, ngrid.reshape(-1), dngrid.reshape(-1), z=z, screening=screening) nu = nu.reshape(nvect.size, dnvect.size, -1) ind_select = NP.where(NP.logical_and(nu >= freq_min, nu <= freq_max)) nu_select = nu[ind_select] n_select = ngrid[:,:,NP.newaxis][ind_select] dn_select = dngrid[:,:,NP.newaxis][ind_select] nu_in_range = None n_in_range = None dn_in_range = None if nu_select.size > 0: if nu_in_range is not None: nu_in_range = units.Quantity(NP.concatenate((nu_in_range.value, nu_select.value)), nu_select.unit) n_in_range = NP.concatenate((n_in_range, n_select)) dn_in_range = NP.concatenate((dn_in_range, dn_select)) else: nu_in_range = nu_select.copy() n_in_range = NP.copy(n_select) dn_in_range = NP.copy(dn_select) if extendsearch is not None: new_extendsearch = None for key in extendsearch: if extendsearch[key] is not None: if key == 'n': if n_select.max() == n_max: if 'up' in extendsearch[key]: new_n_min = n_max + 1 new_n_max = 2 * n_max + 1 - n_min if new_extendsearch is None: new_extendsearch = {key: ['up']} elif key not in new_extendsearch: new_extendsearch[key] = ['up'] else: new_extendsearch[key] += ['up'] new_n_select, new_dn_select, new_nu_select = search_transitions_in_freq_range(freq_min, freq_max, atomic_number, atomic_mass, n_min=new_n_min, n_max=new_n_max, dn_min=dn_min, dn_max=dn_max, z=z, screening=screening, extendsearch=new_extendsearch) if new_nu_select.size > 0: if nu_in_range is not None: nu_in_range = units.Quantity(NP.concatenate((nu_in_range.value, new_nu_select.value)), new_nu_select.unit) n_in_range = NP.concatenate((n_in_range, new_n_select)) dn_in_range = NP.concatenate((dn_in_range, new_dn_select)) else: nu_in_range = new_nu_select.copy() n_in_range = NP.copy(new_n_select) dn_in_range = NP.copy(new_dn_select) if n_select.min() == n_min: if 'down' in extendsearch[key]: if n_min > 1: new_n_min = max([1, 2*n_min - n_max - 1]) new_n_max = n_max - 1 if new_extendsearch is None: new_extendsearch = {key: ['down']} elif key not in new_extendsearch: new_extendsearch[key] = ['down'] else: new_extendsearch[key] += ['down'] new_n_select, new_dn_select, new_nu_select = search_transitions_in_freq_range(freq_min, freq_max, atomic_number, atomic_mass, n_min=new_n_min, n_max=new_n_max, dn_min=dn_min, dn_max=dn_max, z=z, screening=screening, extendsearch=new_extendsearch) if new_nu_select.size > 0: if nu_in_range is not None: nu_in_range = units.Quantity(NP.concatenate((new_nu_select.value, nu_in_range.value)), new_nu_select.unit) n_in_range = NP.concatenate((new_n_select, n_in_range)) dn_in_range = NP.concatenate((new_dn_select, dn_in_range)) else: nu_in_range = new_nu_select.copy() n_in_range = NP.copy(new_n_select) dn_in_range = NP.copy(new_dn_select) if key == 'dn': if dn_select.max() == dn_max: if 'up' in extendsearch[key]: new_dn_min = dn_max + 1 new_dn_max = 2 * dn_max + 1 - dn_min if new_extendsearch is None: new_extendsearch = {key: ['up']} elif key not in new_extendsearch: new_extendsearch[key] = ['up'] else: new_extendsearch[key] += ['up'] new_n_select, new_dn_select, new_nu_select = search_transitions_in_freq_range(freq_min, freq_max, atomic_number, atomic_mass, n_min=n_min, n_max=n_max, dn_min=new_dn_min, dn_max=new_dn_max, z=z, screening=screening, extendsearch=new_extendsearch) if new_nu_select.size > 0: if nu_in_range is not None: nu_in_range = units.Quantity(NP.concatenate((nu_in_range.value, new_nu_select.value)), new_nu_select.unit) n_in_range = NP.concatenate((n_in_range, new_n_select)) dn_in_range = NP.concatenate((dn_in_range, new_dn_select)) else: nu_in_range = new_nu_select.copy() n_in_range = NP.copy(new_n_select) dn_in_range = NP.copy(new_dn_select) if dn_select.min() == dn_min: if 'down' in extendsearch[key]: if dn_min > 1: new_dn_min = max([1, 2*dn_min - dn_max - 1]) new_dn_max = dn_max - 1 if new_extendsearch is None: new_extendsearch = {key: ['down']} elif key not in new_extendsearch: new_extendsearch[key] = ['down'] else: new_extendsearch[key] += ['down'] new_n_select, new_dn_select, new_nu_select = search_transitions_in_freq_range(freq_min, freq_max, atomic_number, atomic_mass, n_min=n_min, n_max=n_max, dn_min=new_dn_min, dn_max=new_dn_max, z=z, screening=screening, extendsearch=new_extendsearch) if new_nu_select.size > 0: if nu_in_range is not None: nu_in_range = units.Quantity(NP.concatenate((new_nu_select.value, nu_in_range.value)), new_nu_select.unit) n_in_range = NP.concatenate((new_n_select, n_in_range)) dn_in_range = NP.concatenate((new_dn_select, dn_in_range)) else: nu_in_range = new_nu_select.copy() n_in_range = NP.copy(new_n_select) dn_in_range = NP.copy(new_dn_select) return (n_in_range, dn_in_range, nu_in_range)
bd5fc3873909ce3937b6e94db9f04edb94dab326
3,657,517
async def test_async__rollback(): """Should rollback basic async actions""" state = {"counter": 0} async def incr(): state["counter"] += 1 return state["counter"] async def decr(): state["counter"] -= 1 async def fail(): raise ValueError("oops") try: with Saga() as saga: counter = await saga.action(incr, decr) assert counter == 1 counter = await saga.action(incr, decr) assert counter == 2 await saga.action(fail, noop) except SagaFailed as e: assert state["counter"] == 0 assert e.transaction.name == "3" assert e.__cause__.args == ("oops",)
54cc780b01190bfd2ea2aacc70e62e8f0b3dfa64
3,657,518
import time import json def _solve_checkpoint_challenge(_bot): """Solve the annoying checkpoint_challenge""" # --- Start challenge time.sleep(3) challenge_url = _bot.last_json['challenge']['api_path'][1:] try: _bot.send_request( challenge_url, None, login=True, with_signature=False) except Exception as e: _bot.logger.error(e) return False # --- Choose and send back the choice # TODO: Sometimes ask to confirm phone or email. # TODO: TESTS NEEDED time.sleep(3) choices = _get_challenge_choices(_bot.last_json) for choice in choices: print(choice) code = input('Insert choice:\n') data = json.dumps({'choice': code}) try: _bot.send_request(challenge_url, data, login=True) except Exception as e: _bot.logger.error(e) return False # Print output for testing _print_bot_last_state(_bot) # --- Wait for the code, insert the code time.sleep(3) print("A code has been sent to the method selected, please check.") code = input('Insert code:\n') data = json.dumps({'security_code': code}) try: _bot.send_request(challenge_url, data, login=True) except Exception as e: _bot.logger.error(e) return False # Print output for testing _print_bot_last_state(_bot) # --- If user logged in, save cookie, otherwise PASS worked = ( ('logged_in_user' in _bot.last_json) and (_bot.last_json.get('action', '') == 'close') and (_bot.last_json.get('status', '') == 'ok')) if worked: # IMPORTANT, save the cookie at this step! _bot.save_cookie(COOKIE_FNAME) return True else: _bot.logger.error('Not possible to log in. Reset and try again') return False
5114ac1c49eecf174a994f4b487e1d8a30d4f907
3,657,519
import requests def is_referenced(url, id, catalog_info): """Given the url of a resource from the catalog, this function returns True if the resource is referenced by data.gouv.fr False otherwise :param :url: url of a resource in the catalog :type :url: string""" dgf_page = catalog_info['url_dgf'] headers = requests.head(url).headers downloadable = 'attachment' in headers.get('Content-Disposition', '') if not downloadable: raise Exception(f'This id is associated to a dataset not referenced by data.gouv.fr. \n ' f'Please download the dataset from here: {dgf_page}\n' f'Then manually upload it in the corresponding folder and name it: {id}.csv') return downloadable
15cfa64979f2765d29d7c4bb60a7a017feb27d43
3,657,520
import glob import functools def create_sema3d_datasets(args, test_seed_offset=0): """ Gets training and test datasets. """ train_names = ['bildstein_station1', 'bildstein_station5', 'domfountain_station1', 'domfountain_station3', 'neugasse_station1', 'sg27_station1', 'sg27_station2', 'sg27_station5', 'sg27_station9', 'sg28_station4', 'untermaederbrunnen_station1'] valid_names = ['bildstein_station3', 'domfountain_station2', 'sg27_station4', 'untermaederbrunnen_station3'] #train_names = ['bildstein_station1', 'domfountain_station1', 'untermaederbrunnen_station1'] #valid_names = ['domfountain_station2', 'untermaederbrunnen_station3'] path = '{}/features_supervision/'.format(args.ROOT_PATH) if args.db_train_name == 'train': trainlist = [path + 'train/' + f + '.h5' for f in train_names] elif args.db_train_name == 'trainval': trainlist = [path + 'train/' + f + '.h5' for f in train_names + valid_names] testlist = [] if 'train' in args.db_test_name: testlist += [path + 'train/' + f + '.h5' for f in train_names] if 'val' in args.db_test_name: testlist += [path + 'train/' + f + '.h5' for f in valid_names] if 'testred' in args.db_test_name: testlist += [f for f in glob.glob(path + 'test_reduced/*.h5')] if 'testfull' in args.db_test_name: testlist += [f for f in glob.glob(path + 'test_full/*.h5')] return tnt.dataset.ListDataset(trainlist, functools.partial(graph_loader, train=True, args=args, db_path=args.ROOT_PATH)), \ tnt.dataset.ListDataset(testlist, functools.partial(graph_loader, train=False, args=args, db_path=args.ROOT_PATH, full_cpu = True))
8642c5a10a5256fb9541be86676073c993b2faf8
3,657,521
def adjust_learning_rate(optimizer, step, args): """ Sets the learning rate to the initial LR decayed by gamma at every specified step/epoch Adapted from PyTorch Imagenet example: https://github.com/pytorch/examples/blob/master/imagenet/main.py step could also be epoch """ schedule_list = np.array(args.schedule) decay = args.gamma ** (sum(step >= schedule_list)) lr = args.lr * decay for param_group in optimizer.param_groups: param_group['lr'] = lr return lr
359e2c5e0deb1abd156b7a954ecfae1b23511db2
3,657,522
def sigmoid(z): """sigmoid函数 """ return 1.0/(1.0+np.exp(-z))
80187d3711d18602a33d38edcc48eaad5c51818f
3,657,523
def beamformerFreq(steerVecType, boolRemovedDiagOfCSM, normFactor, inputTupleSteer, inputTupleCsm): """ Conventional beamformer in frequency domain. Use either a predefined steering vector formulation (see Sarradj 2012) or pass your own steering vector. Parameters ---------- steerVecType : (one of the following strings: 'classic' (I), 'inverse' (II), 'true level' (III), 'true location' (IV), 'custom') Either build the steering vector via the predefined formulations I - IV (see :ref:`Sarradj, 2012<Sarradj2012>`) or pass it directly. boolRemovedDiagOfCSM : bool Should the diagonal of the csm be removed? normFactor : float In here both the signalenergy loss factor (due to removal of the csm diagonal) as well as beamforming algorithm (music, capon, ...) dependent normalization factors are handled. inputTupleSteer : contains the information needed to create the steering vector. Is dependent of steerVecType. There are 2 cases: steerVecType != 'custom' : inputTupleSteer = (distGridToArrayCenter, distGridToAllMics, waveNumber) , with distGridToArrayCenter : float64[nGridpoints] Distance of all gridpoints to the center of sensor array distGridToAllMics : float64[nGridpoints, nMics] Distance of all gridpoints to all sensors of array waveNumber : float64 The wave number steerVecType == 'custom' : inputTupleSteer = steeringVector , with steeringVector : complex128[nGridPoints, nMics] The steering vector of each gridpoint for the same frequency as the CSM inputTupleCsm : contains the data of measurement as a tuple. There are 2 cases: perform standard CSM-beamformer: inputTupleCsm = csm csm : complex128[ nMics, nMics] The cross spectral matrix for one frequency perform beamformer on eigenvalue decomposition of csm: inputTupleCsm = (eigValues, eigVectors) , with eigValues : float64[nEV] nEV is the number of eigenvalues which should be taken into account. All passed eigenvalues will be evaluated. eigVectors : complex128[nMics, nEV] Eigen vectors corresponding to eigValues. All passed eigenvector slices will be evaluated. Returns ------- *Autopower spectrum beamforming map [nGridPoints] *steer normalization factor [nGridPoints]... contains the values the autopower needs to be multiplied with, in order to fullfill 'steer^H * steer = 1' as needed for functional beamforming. Some Notes on the optimization of all subroutines ------------------------------------------------- Reducing beamforming equation: Let the csm be C and the steering vector be h, than, using Linear Albegra, the conventional beamformer can be written as .. math:: B = h^H \\cdot C \\cdot h, with ^H meaning the complex conjugated transpose. When using that C is a hermitian matrix one can reduce the equation to .. math:: B = h^H \\cdot C_D \\cdot h + 2 \\cdot Real(h^H \\cdot C_U \\cdot h), where C_D and C_U are the diagonal part and upper part of C respectively. Steering vector: Theoretically the steering vector always includes the term "exp(distMicsGrid - distArrayCenterGrid)", but as the steering vector gets multplied with its complex conjugation in all beamformer routines, the constant "distArrayCenterGrid" cancels out --> In order to save operations, it is not implemented. Spectral decomposition of the CSM: In Linear Algebra the spectral decomposition of the CSM matrix would be: .. math:: CSM = \\sum_{i=1}^{nEigenvalues} \\lambda_i (v_i \\cdot v_i^H) , where lambda_i is the i-th eigenvalue and v_i is the eigenvector[nEigVal,1] belonging to lambda_i and ^H denotes the complex conjug transpose. Using this, one must not build the whole CSM (which would be time consuming), but can drag the steering vector into the sum of the spectral decomp. This saves a lot of operations. Squares: Seemingly "a * a" is slightly faster than "a**2" in numba Square of abs(): Even though "a.real**2 + a.imag**2" would have fewer operations, modern processors seem to be optimized for "a * a.conj" and are slightly faster the latter way. Both Versions are much faster than "abs(a)**2". Using Cascading Sums: When using the Spectral-Decomposition-Beamformer one could use numpys cascading sums for the scalar product "eigenVec.conj * steeringVector". BUT (at the moment) this only brings benefits in comp-time for a very small range of nMics (approx 250) --> Therefor it is not implemented here. """ boolIsEigValProb = isinstance(inputTupleCsm, tuple)# len(inputTupleCsm) > 1 # get the beamformer type (key-tuple = (isEigValProblem, formulationOfSteeringVector, RemovalOfCSMDiag)) beamformerDict = {(False, 'classic', False) : _freqBeamformer_Formulation1AkaClassic_FullCSM, (False, 'classic', True) : _freqBeamformer_Formulation1AkaClassic_CsmRemovedDiag, (False, 'inverse', False) : _freqBeamformer_Formulation2AkaInverse_FullCSM, (False, 'inverse', True) : _freqBeamformer_Formulation2AkaInverse_CsmRemovedDiag, (False, 'true level', False) : _freqBeamformer_Formulation3AkaTrueLevel_FullCSM, (False, 'true level', True) : _freqBeamformer_Formulation3AkaTrueLevel_CsmRemovedDiag, (False, 'true location', False) : _freqBeamformer_Formulation4AkaTrueLocation_FullCSM, (False, 'true location', True) : _freqBeamformer_Formulation4AkaTrueLocation_CsmRemovedDiag, (False, 'custom', False) : _freqBeamformer_SpecificSteerVec_FullCSM, (False, 'custom', True) : _freqBeamformer_SpecificSteerVec_CsmRemovedDiag, (True, 'classic', False) : _freqBeamformer_EigValProb_Formulation1AkaClassic_FullCSM, (True, 'classic', True) : _freqBeamformer_EigValProb_Formulation1AkaClassic_CsmRemovedDiag, (True, 'inverse', False) : _freqBeamformer_EigValProb_Formulation2AkaInverse_FullCSM, (True, 'inverse', True) : _freqBeamformer_EigValProb_Formulation2AkaInverse_CsmRemovedDiag, (True, 'true level', False) : _freqBeamformer_EigValProb_Formulation3AkaTrueLevel_FullCSM, (True, 'true level', True) : _freqBeamformer_EigValProb_Formulation3AkaTrueLevel_CsmRemovedDiag, (True, 'true location', False) : _freqBeamformer_EigValProb_Formulation4AkaTrueLocation_FullCSM, (True, 'true location', True) : _freqBeamformer_EigValProb_Formulation4AkaTrueLocation_CsmRemovedDiag, (True, 'custom', False) : _freqBeamformer_EigValProb_SpecificSteerVec_FullCSM, (True, 'custom', True) : _freqBeamformer_EigValProb_SpecificSteerVec_CsmRemovedDiag} coreFunc = beamformerDict[(boolIsEigValProb, steerVecType, boolRemovedDiagOfCSM)] # prepare Input if steerVecType == 'custom': # beamformer with custom steering vector steerVec = inputTupleSteer #nFreqs, nGridPoints = steerVec.shape[0], steerVec.shape[1] nGridPoints = steerVec.shape[0] else: # predefined beamformers (Formulation I - IV) distGridToArrayCenter, distGridToAllMics, waveNumber = inputTupleSteer#[0], inputTupleSteer[1], inputTupleSteer[2] if not isinstance(waveNumber, np.ndarray): waveNumber = np.array([waveNumber]) #nFreqs, nGridPoints = waveNumber.shape[0], distGridToAllMics.shape[0] nGridPoints = distGridToAllMics.shape[0] if boolIsEigValProb: eigVal, eigVec = inputTupleCsm#[0], inputTupleCsm[1] else: csm = inputTupleCsm # beamformer routine: parallelized over Gridpoints beamformOutput = np.zeros(nGridPoints, np.float64) steerNormalizeOutput = np.zeros_like(beamformOutput) result = np.zeros(nGridPoints, np.float64) normalHelp = np.zeros_like(result) if steerVecType == 'custom': # beamformer with custom steering vector if boolIsEigValProb: coreFunc(eigVal, eigVec, steerVec, normFactor, result, normalHelp) else: coreFunc(csm, steerVec, normFactor, result, normalHelp) else: # predefined beamformers (Formulation I - IV) if boolIsEigValProb: coreFunc(eigVal, eigVec, distGridToArrayCenter, distGridToAllMics, waveNumber, normFactor, result, normalHelp) else: coreFunc(csm, distGridToArrayCenter, distGridToAllMics, waveNumber, normFactor, result, normalHelp) beamformOutput = result steerNormalizeOutput = normalHelp return beamformOutput, steerNormalizeOutput
f747122b0dff9a7b966813062b93a1cab8a91f3f
3,657,524
from typing import IO def createNewPY(): """trans normal pinyin to TTS pinyin""" py_trans = {} input_pinyin_list = IO.readList(r'docs/transTTSPinyin.txt') for line in input_pinyin_list: line_array = line.split(',') py_trans[line_array[0]] = line_array[1] return py_trans
e2bd5007cc217f72e3ffbeafd0ff75e18f8ec213
3,657,525
import re def search_wheelmap (lat, lng, interval, name, n): """Searches for a place which matches the given name in the given coordinates range. Returns false if nothing found""" # Calculate the bbox for the API call from_lat = lat - interval to_lat = lat + interval from_lng = lng - interval to_lng = lng + interval # Remove parentheses (better for search, generally) name = re.sub(r'\([^)]*\)', '', name) wheelmap_client = wheelmap.Wheelmap(env['WHEELMAP_API_KEY']) bbox= (from_lng, from_lat, to_lng, to_lat) nodes = wheelmap_client.nodes_collection(bbox=bbox, per_page=n) # max_node and max_name_match are holding the # best match through the SequenceMatcher after the loop max_name_match = 0.0 for node in nodes: if node.name and name: name_match = SequenceMatcher(None, node.name, name).ratio() if name_match > max_name_match: max_node = node max_name_match = name_match # Is the best match better than 60% ? # If yes, let's take it. Otherwise nothing was found. if max_name_match > 0.6: return max_node else: return False
88dfbf973fbd4891a4d8bf955335177ca3654016
3,657,526
from typing import Dict def get_entity_contents(entity: Dict) -> Dict: """ :param entity: Entity is a dictionary :return: A dict representation of the contents of entity """ return { 'ID': entity.get('id'), 'Name': entity.get('name'), 'EmailAddress': entity.get('email_address'), 'Organization': entity.get('organization'), 'Tags': entity.get('labels'), 'StrictNameMatching': entity.get('strict_name_matching'), 'PolicyID': entity.get('policy_id'), 'Profile': entity.get('profile'), 'EntityGroupID': entity.get('entity_group', {}).get('id') if entity.get('entity_group') else None, 'EntityGroupName': entity.get('entity_group', {}).get('name') if entity.get('entity_group') else None, 'TypeID': entity.get('type', {}).get('id') if entity.get('type') else None, 'TypeName': entity.get('type', {}).get('name') if entity.get('type') else None }
3c9e133bf80bc4d59c6f663503b5083401acc4e0
3,657,527
def t68tot90(t68): """Convert from IPTS-68 to ITS-90 temperature scales, as specified in the CF Standard Name information for sea_water_temperature http://cfconventions.org/Data/cf-standard-names/27/build/cf-standard-name-table.html temperatures are in degrees C""" t90 = 0.99976 * t68 return t90
87ff55a196f01b8f1afd78381e7d012eafa079fa
3,657,528
def get_sort_accuracy_together(fake_ys, y): """ Args: fake_ys (np.ndarray): with shape (n_results, n_sample,). y (np.ndarray): with sample (n_sample,). Returns: corr (np.ndarray): with shape (n_result,) """ y_sort = np.sort(y) y_sort2 = np.sort(y)[::-1] fake_ys = np.nan_to_num(fake_ys, nan=np.nan, posinf=np.nan, neginf=np.nan) mark = np.any(np.isnan(fake_ys), axis=1) fake_ys = np.nan_to_num(fake_ys, nan=-1, posinf=-1, neginf=-1) index = np.argsort(fake_ys, axis=1) y_pre_sort = y[index] acc1 = 1 - np.mean(np.abs(y_pre_sort - y_sort), axis=1) acc2 = 1 - np.mean(np.abs(y_pre_sort - y_sort2), axis=1) score = np.max(np.concatenate((acc1.reshape(1, -1), acc2.reshape(1, -1)), axis=0), axis=0) score[mark] = 0.0 return score
4ba4810057bb936fdb5a94669796b0a260eeee49
3,657,529
def random_account_number(): """ Generate random encoded account number for testing """ _, account_number = create_account() return encode_verify_key(verify_key=account_number)
d662dc0acdc78f86baf2de998ab6ab920cc80ca0
3,657,530
def get_recommendation_summary_of_projects(project_ids, state, credentials): """Returns the summary of recommendations on all the given projects. Args: project_ids: List(str) project to which recommendation is needed. state: state of recommendations credentials: client credentials. """ recommender = build("recommender", "v1", credentials=credentials, cache_discovery=False) def get_metric(project_id): recommendation_metric = common.get_recommendations( project_id, recommender=recommender, state=state, credentials=credentials) return accounts_can_made_safe(project_id, state, recommendation_metric) recommendation_stats = common.rate_limit_execution(get_metric, RATE_LIMIT, project_ids) recommendation_stats_sorted = sorted( recommendation_stats, key=lambda metric: -sum(metric["stats"].values())) return recommendation_stats_sorted
68cd42e4465bbdc85d88b82cb345b64a4ec1fec8
3,657,531
def selection_filter(file_path): """ 获得经过filter方法获得的特征子集 f_classif, chi2, mutual_info_classif """ df = pd.read_csv(file_path) delete_list = ['id'] df.drop(delete_list, axis=1, inplace=True) feature_attr = [i for i in df.columns if i not in ['label']] df.fillna(0, inplace=True) # 特征预处理 obj_attrs = [] for attr in feature_attr: if df.dtypes[attr] == np.dtype(object): # 添加离散数据列 obj_attrs.append(attr) if len(obj_attrs) > 0: df = pd.get_dummies(df, columns=obj_attrs) # 转为哑变量 y = df.label X = df.drop('label', axis=1) model = SelectKBest(f_classif, k=108) X_new = model.fit_transform(X, y) df_X_new = pd.DataFrame(X_new) list = [] for i in X.columns: for j in df_X_new.columns: if np.sum(np.abs(X[i].values - df_X_new[j].values)) == 0: list.append(i) break useful_list = sorted(set(X.columns.to_list()) - set(list), key = X.columns.to_list().index) print(useful_list) list.append('label') return list
d6f6848c499f2d4899828e1e1bd0fb0ffe930186
3,657,532
def _process_voucher_data_for_order(cart): """Fetch, process and return voucher/discount data from cart.""" vouchers = Voucher.objects.active(date=date.today()).select_for_update() voucher = get_voucher_for_cart(cart, vouchers) if cart.voucher_code and not voucher: msg = pgettext( 'Voucher not applicable', 'Voucher expired in meantime. Task placement aborted.') raise NotApplicable(msg) if not voucher: return {} increase_voucher_usage(voucher) return { 'voucher': voucher, 'discount_amount': cart.discount_amount, 'discount_name': cart.discount_name, 'translated_discount_name': cart.translated_discount_name}
ec15f13607cee7e4bdd2e16f9a44904638964d36
3,657,533
def is_insertion(ref, alt): """Is alt an insertion w.r.t. ref? Args: ref: A string of the reference allele. alt: A string of the alternative allele. Returns: True if alt is an insertion w.r.t. ref. """ return len(ref) < len(alt)
17d7d6b8dfdf387e6dd491a6f782e8c9bde22aff
3,657,534
from typing import Optional def identify_fast_board(switches: int, drivers: int) -> Optional[FastIOBoard]: """Instantiate and return a FAST board capable of accommodating the given number of switches and drivers.""" if switches > 32 or drivers > 16: return None if switches > 16: return None if drivers > 8 else FastIO3208() if drivers <= 4: return FastIO0804() if switches <= 8: return FastIO1616() return None
27c0dca3e0421c9b74976a947eda5d6437598c01
3,657,535
import struct def encode_hop_data( short_channel_id: bytes, amt_to_forward: int, outgoing_cltv_value: int ) -> bytes: """Encode a legacy 'hop_data' payload to bytes https://github.com/lightningnetwork/lightning-rfc/blob/master/04-onion-routing.md#legacy-hop_data-payload-format :param short_channel_id: the short channel id this hop relates to :param amt_to_forward: the amount to forward on this hop :param outgoing_cltv_value: the outgoing cltv value to use for this hop :return: the hop_data payload """ # Bolt #7: The hop_data format is identified by a single 0x00-byte length, for # backward compatibility. hop_data = struct.pack(config.be_u8, 0x00) hop_data += short_channel_id hop_data += struct.pack(config.be_u64, amt_to_forward) hop_data += struct.pack(config.be_u32, outgoing_cltv_value) # [12*byte:padding] hop_data += b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" return hop_data
51fda780036fdcbb8ff1d5cd77b422aaf92eb4fd
3,657,536
def extract_all_patterns(game_state, action, mask, span): """ Extracting the local forward model pattern for each cell of the grid's game-state and returning a numpy array :param prev_game_state: game-state at time t :param action: players action at time t :param game_state: resulting game-state at time t+1 :param mask: square pattern mask (boolean array to mark which tiles should be included. :param span: The span of the mask. :return: np.ndarray of observed patterns """ data_set = np.zeros((game_state.shape[0]*game_state.shape[1], np.sum(mask)+1)) # only iterate over positions that were affected by the game state's changes positions = [(x, y) for x in range(game_state.shape[0]) for y in range(game_state.shape[1])] ext_game_state_grid = np.pad(game_state, span, "constant", constant_values=1) for i, (x, y) in enumerate(positions): el = ext_game_state_grid[span + x - span: span + x + span + 1, span + y - span: span + y + span + 1][mask].tolist() el.append(action) data_set[i, :] = el return data_set
06e44c871a14b7685ca5dd165285cfe2c7076b85
3,657,537
import os def wrapper_subcavities(final_cavities, cav_of_interest, grid_min, grid_shape, cavities, code, out, sourcedir, list_ligands, seeds_mindist = 3, merge_subcavs = True, minsize_subcavs = 50, min_contacts = 0.667, v = False, printv = False, print_pphores_subcavs = False, export_subcavs = False, gridspace = 1.0, frame = None): """ Wraps transform_cav2im3d, find_subcav_watershed, map_subcav_in_cav merge_small_enclosed_subcavs, print_subcavs_pphores and export_pdb_subcavities as one function """ # Convert to a 3D image for skimage im3d = transform_cav2im3d(final_cavities[cav_of_interest], grid_min, grid_shape) #filtered_pharma[order][cav_of_interest]) # Perform the watershed algorithm, including entropy of pharmacophores labels = find_subcav_watershed(im3d, seeds_mindist) # Map results of watershed to grid points of cavity #subcavs = map_subcav_in_cav(cavities, cav_of_interest, labels, args.code, grid_min, grid_shape) subcavs = map_subcav_in_cav(labels, grid_min) if merge_subcavs == True: subcavs = merge_small_enclosed_subcavs(subcavs, minsize_subcavs = minsize_subcavs, min_contacts = min_contacts, v = v) subcavs_table = print_subcavs_pphores(cavities, subcavs, cav_of_interest, code, grid_min, grid_shape, frame) # Export if export_subcavs: try: os.mkdir(out) except: pass if frame: export_pdb_subcavities(subcavs, code[:-4]+"_"+str(frame), grid_min, grid_shape, cavid = cav_of_interest, gridspace = gridspace, outdir = out, listlig = list_ligands, oridir = sourcedir) else: export_pdb_subcavities(subcavs, code[:-4], grid_min, grid_shape, cavid = cav_of_interest, gridspace = gridspace, outdir = out, listlig = list_ligands, oridir = sourcedir) return subcavs_table
94751b892b473d818f27f431420aa7de726c91d3
3,657,538
import os def generate_datafile(lists_of_systems, output_dir, filename): """ take in a list of lists which contains systems generate one input data file per list """ result = [] for index, list_of_sys in enumerate(lists_of_systems): output_filename = filename + "_" + str(index) + ".xml" output_file = os.path.join(output_dir, output_filename) fd = file_Utils.open_file(output_file, "w+") if fd is not None: root = xml_Utils.create_element("root") for system in list_of_sys: root.append(system) fd.write(xml_Utils.convert_element_to_string(root)) result.append(output_file) return result
70024dc2c1420b9fbff312856b8bb48ee645e772
3,657,539
def cond(*args, **kwargs): """Conditional computation to run on accelerators.""" return backend()['cond'](*args, **kwargs)
969307c62bd4a2eef6b16dffff953910524cc3c1
3,657,540
import os def get_testfile_paths(): """ return the necessary paths for the testfile tests Returns ------- str absolute file path to the test file str absolute folder path to the expected output folder """ testfile = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'test_data', '0009_20170523_181119_FA2806.all') expected_output = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'test_data', 'converted') return testfile, expected_output
f1cb8d29c70c686fbca43175637f44b7c5342180
3,657,541
def singleton(cls): """Decorator that provides singleton functionality. >>> @singleton ... class Foo(object): ... pass ... >>> a = Foo() >>> b = Foo() >>> a is b True """ _inst = [None] def decorated(*args, **kwargs): if _inst[0] is None: _inst[0] = cls(*args, **kwargs) return _inst[0] return decorated
4ae64aeaaba1b838232e4d7700d692dcc109be6d
3,657,542
import inspect def _with_factory(make_makers): """Return a decorator for test methods or classes. Args: make_makers (callable): Return an iterable over (name, maker) pairs, where maker (callable): Return a fixture (arbitrary object) given Factory as single argument """ def wrap(test_func): def wrapper(self, *args, **kwargs): factory = make_factory( self.addCleanup, test=self, root=None, makers=make_makers()) return test_func(self, factory, *args, **kwargs) return wrapper def deco(test_func_or_class): if inspect.isclass(test_func_or_class): class_ = test_func_or_class for name, method in inspect.getmembers(class_, is_test_method): wrapped_method = wrap(method) setattr(class_, name, wrapped_method) return class_ else: method = test_func_or_class return wrap(method) return deco
5841e80129b212bba2c6d0b1f89966fa0d5ce152
3,657,543
import time def timeItDeco(func): """ Decorator which times the given function. """ def timing(*args, **kwargs): """ This function will replace the original function. """ # Start the clock t1 = time.clock() # Run the original function and collect results result = func(*args, **kwargs) # Print out the execution time print('Execution time', time.clock() - t1) return result # Return the funtion that was modified return timing
9c59a512a9cf9eac190af4a88dbf8ccab2069f55
3,657,544
def apply_haste(self: Player, target: Player, rules: dict, left: bool) -> EffectReturn: """ Apply the effects of haste to the target: attack beats attack """ # "attack": {"beats": ["disrupt", "area", "attack"], "loses": ["block", "dodge"]} if left: # Remove attack from the attack: loses dict if "attack" in rules["attack"]["loses"]: rules["attack"]["loses"].remove("attack") # Add attack to the attack: beats dict if "attack" not in rules["attack"]["beats"]: rules["attack"]["beats"].append("attack") # "attack": {"beats": ["disrupt", "area"], "loses": ["block", "dodge", "attack"]} else: # Remove attack from the attack: beats dict if "attack" in rules["attack"]["beats"]: rules["attack"]["beats"].remove("attack") # Add attack to the attack: loses dict if "attack" not in rules["attack"]["loses"]: rules["attack"]["loses"].append("attack") return self, target, rules
0186fe8553cb89c73d9a3cfae35048cd465b9859
3,657,545
def get_mean_cube(datasets): """Get mean cube of a list of datasets. Parameters ---------- datasets : list of dict List of datasets (given as metadata :obj:`dict`). Returns ------- iris.cube.Cube Mean cube. """ cubes = iris.cube.CubeList() for dataset in datasets: path = dataset['filename'] cube = iris.load_cube(path) prepare_cube_for_merging(cube, path) cubes.append(cube) mean_cube = cubes.merge_cube() if len(cubes) > 1: mean_cube = mean_cube.collapsed(['cube_label'], iris.analysis.MEAN) mean_cube.remove_coord('cube_label') return mean_cube
492b5df11252beb691c62c58005ce2c3c1dcb3b8
3,657,546
async def gen_unique_chk_sum(phone, message, first_dial): """Generates a checksum in order to identify every single call""" return blake2b( bytes(phone, encoding="utf-8") + bytes(message, encoding="utf-8") + bytes(str(first_dial), encoding="utf-8"), digest_size=4, ).hexdigest()
c85076f4fd1e2814116ece59390bebb9f398a4f6
3,657,547
def getQtipResults(version, installer): """ Get QTIP results """ period = get_config('qtip.period') url_base = get_config('testapi.url') url = ("http://" + url_base + "?project=qtip" + "&installer=" + installer + "&version=" + version + "&period=" + str(period)) request = Request(url) try: response = urlopen(request) k = response.read() response.close() results = json.loads(k)['results'] except URLError as err: print 'Got an error code: {}'.format(err) result_dict = {} if results: for r in results: key = '{}/{}'.format(r['pod_name'], r['scenario']) if key not in result_dict.keys(): result_dict[key] = [] result_dict[key].append(r['details']['score']) # return scenario_results return result_dict
4ae01b33a2eed23a8d3ad7b7dd1d5a3bcc8d5ab8
3,657,548
def scaled_softplus(x, alpha, name=None): """Returns `alpha * ln(1 + exp(x / alpha))`, for scalar `alpha > 0`. This can be seen as a softplus applied to the scaled input, with the output appropriately scaled. As `alpha` tends to 0, `scaled_softplus(x, alpha)` tends to `relu(x)`. Note: the gradient for this operation is defined to depend on the backprop inputs as well as the outputs of this operation. Args: x: A `Tensor` of inputs. alpha: A scalar `Tensor`, indicating the amount of smoothness. The caller must ensure that `alpha > 0`. name: A name for the scope of the operations (optional). Returns: A tensor of same size and type as `x`. """ with ops.name_scope(name, 'scaled_softplus', [x, alpha]): x = ops.convert_to_tensor(x, name='x') dtype = x.dtype alpha = ops.convert_to_tensor(alpha, dtype=dtype, name='alpha') # Verify that alpha is a scalar. alpha.get_shape().assert_has_rank(0) def _grad(op, g): """Backprop for scaled softplus.""" y = op.outputs[0] alpha = op.inputs[1] # Prevent the expensive computations from happening before g is available. with ops.control_dependencies([g]): y /= alpha emy = math_ops.exp(-y) dy_dx = 1. - emy # The eps below avoids log(0). Note that t*log(t) -> 0 as t->0. eps = 1e-8 dy_dalpha = y * emy - dy_dx * math_ops.log(dy_dx + eps) return g * dy_dx, math_ops.reduce_sum(g * dy_dalpha) @function.Defun(dtype, dtype, func_name='ScaledSoftplus_%s' % dtype.name, shape_func=lambda op: [op.inputs[0].get_shape()], python_grad_func=_grad) def _forward(x, alpha): """Forward computation of scaled softplus.""" return alpha * nn.softplus(x / alpha) return _forward(x, alpha)
526c5169b1ac938e3f645e96dc7e65bb4acf64b5
3,657,549
def get_choice(options): """Devuelve como entero la opcion seleccionada para el input con mensaje message""" print(options) try: return int(input("Por favor, escoja una opción: ")) except ValueError: return 0
32e95e0113650d0b94449e5e31e7d8156ae85981
3,657,550
def _listminus(list1, list2): """ """ return [a for a in list1 if a not in list2]
3f05d8bfd4169d92bb51c4617536b54779b387c9
3,657,551
import pytesseract from pdf2image import convert_from_bytes def pdf_to_hocr(path, lang="fra+deu+ita+eng", config="--psm 4"): """Loads and transform a pdf into an hOCR file. Parameters ---------- path : str, required The pdf's path lang: str, optional (default="fra+deu+ita+eng") Supporter Language of Pytesseract. config: str, optional (default = "--psm 4") Custom configuration flag used by Tesseract """ try: except ImportError: logger.error( "pytesseract and pdf2image have to be installed to use this function\n run `pip install -U pytesseract pdf2image`" ) return with open(path, "rb") as f: images = convert_from_bytes(f.read(), dpi=300) return images_to_hocr(images)
9619d45dc418f07634fd161f1dff50b4cf334e21
3,657,552
import httpx async def fetch_cart_response(cart_id: str) -> httpx.Response: """Fetches cart response.""" headers = await get_headers() async with httpx.AsyncClient(base_url=CART_BASE_URL) as client: response = await client.get( url=f'/{cart_id}', headers=headers, ) try: response.raise_for_status() except httpx.HTTPStatusError: raise MoltinError(response.json()) # type: ignore return response
2d2da772b257b43beda78f3b08c42c914c01f00d
3,657,553
from sys import stdout import logging from typing import Protocol def checkHardware(binary, silent=False, transaction=None): """ probe caffe continuously for incrementing until missing id structure: [ { "id": 0, "name": "..", "log": ["..", "..", "..", ... ] }, { "id": 1, "name": "..", "log": ["..", "..", "..", ... ] }, ... ] """ gid = 0 hw = [] if not silent: stdout.write("Checking Hardware...\n") logging.info("Checking Hardware...") cpu = _getCPU() name = _getCPUName(cpu) hw.append({"name": name, "log": cpu}) if not silent: stdout.write("CPU found: " + name + "\n") logging.info("CPU found: %s", name) if transaction: msg = {"key": Protocol.SCANHARDWARE, "finished": False, "name": name} transaction.send(msg) while True: log = _getId(gid, binary) if not _isValid(log) or _isCpuOnly(log): if not silent and gid is 0: stdout.write("No GPU found, CPU mode\n") logging.info("No GPU found, CPU mode") break name = _getName(log) if not silent: stdout.write("GPU " + str(gid) + " found: " + name + "\n") if transaction: msg = {"key": Protocol.SCANHARDWARE, "finished": False, "name": name, "id": gid} transaction.send(msg) hw.append({"id": gid, "name": name, "log": _parseLog(log)}) gid += 1 return hw
be13049d6d790410430de8a507ceefc61f276eec
3,657,554
def is_namespace_mutable(context, namespace): """Return True if the namespace is mutable in this context.""" if context.is_admin: return True if context.owner is None: return False return namespace.owner == context.owner
f5303e75b975a1ba51aa39c608ec5af339917446
3,657,555
def get_schularten_by_veranst_iq_id(veranst_iq_id): """ liefert die Liste der zu der Veranstaltung veranst_iq_id passenden Schularten """ query = session.query(Veranstaltung).add_entity(Schulart).join('rel_schulart') query = query.reset_joinpoint() query = query.filter_by(veranst_iq_id=veranst_iq_id) return query.all()
4c18b2fe73b17752ee2838815fa9fde8426a7ccb
3,657,556
def get_station_freqs(df, method='median'): """ apply to df after applying group_by_days and group_by_station """ #df['DATE'] = df.index.get_level_values('DATE') df['DAY'] = [d.dayofweek for d in df.index.get_level_values('DATE')] df['DAYNAME'] = [d.day_name() for d in df.index.get_level_values('DATE')] return df.groupby(['STATION', 'DAY','DAYNAME']).agg({'INS':method, 'OUTS':method})
aebc1a2486c48ff2d829fc70f1f2c4b38bd3017b
3,657,557
def faster_symbol_array(genome, symbol): """A faster calculation method for counting a symbol in genome. Args: genome (str): a DNA string as the search space. symbol (str): the single base to query in the search space. Returns: Dictionary, a dictionary, position-counts pairs of symbol in each genome sliding window. Examples: The symbol array for genome equal to "AAAAGGGG" and symbol equal to "A". >>> genome = 'AAAAGGGG' >>> symbol = 'A' >>> position_symbolcount_dict = symbol_array(genome, symbol) >>> position_symbolcount_dict {0: 4, 1: 3, 2: 2, 3: 1, 4: 0, 5: 1, 6: 2, 7: 3} """ array = {} n = len(genome) extended_genome = genome + genome[0:n//2] # look at the first half of Genome to compute first array value array[0] = pattern_count(symbol, genome[0:n//2]) for i in range(1, n): # start by setting the current array value equal to the previous array value array[i] = array[i-1] # the current array value can differ from the previous array value by at most 1 if extended_genome[i-1] == symbol: array[i] = array[i]-1 if extended_genome[i+(n//2)-1] == symbol: array[i] = array[i]+1 return array
a1bbf70a211adcee14573534b62b4a4af5abdebd
3,657,558
def crosswalk_patient_id(user): """ Get patient/id from Crosswalk for user """ logger.debug("\ncrosswalk_patient_id User:%s" % user) try: patient = Crosswalk.objects.get(user=user) if patient.fhir_id: return patient.fhir_id except Crosswalk.DoesNotExist: pass return None
1424d5fdc3917d76bd0e8905b44e261068fad4f5
3,657,559
def makeArg(segID: int, N, CA, C, O, geo: ArgGeo) -> Residue: """Creates an Arginie residue""" ##R-Group CA_CB_length = geo.CA_CB_length C_CA_CB_angle = geo.C_CA_CB_angle N_C_CA_CB_diangle = geo.N_C_CA_CB_diangle CB_CG_length = geo.CB_CG_length CA_CB_CG_angle = geo.CA_CB_CG_angle N_CA_CB_CG_diangle = geo.N_CA_CB_CG_diangle CG_CD_length = geo.CG_CD_length CB_CG_CD_angle = geo.CB_CG_CD_angle CA_CB_CG_CD_diangle = geo.CA_CB_CG_CD_diangle CD_NE_length = geo.CD_NE_length CG_CD_NE_angle = geo.CG_CD_NE_angle CB_CG_CD_NE_diangle = geo.CB_CG_CD_NE_diangle NE_CZ_length = geo.NE_CZ_length CD_NE_CZ_angle = geo.CD_NE_CZ_angle CG_CD_NE_CZ_diangle = geo.CG_CD_NE_CZ_diangle CZ_NH1_length = geo.CZ_NH1_length NE_CZ_NH1_angle = geo.NE_CZ_NH1_angle CD_NE_CZ_NH1_diangle = geo.CD_NE_CZ_NH1_diangle CZ_NH2_length = geo.CZ_NH2_length NE_CZ_NH2_angle = geo.NE_CZ_NH2_angle CD_NE_CZ_NH2_diangle = geo.CD_NE_CZ_NH2_diangle carbon_b = calculateCoordinates( N, C, CA, CA_CB_length, C_CA_CB_angle, N_C_CA_CB_diangle ) CB = Atom("CB", carbon_b, 0.0, 1.0, " ", " CB", 0, "C") carbon_g = calculateCoordinates( N, CA, CB, CB_CG_length, CA_CB_CG_angle, N_CA_CB_CG_diangle ) CG = Atom("CG", carbon_g, 0.0, 1.0, " ", " CG", 0, "C") carbon_d = calculateCoordinates( CA, CB, CG, CG_CD_length, CB_CG_CD_angle, CA_CB_CG_CD_diangle ) CD = Atom("CD", carbon_d, 0.0, 1.0, " ", " CD", 0, "C") nitrogen_e = calculateCoordinates( CB, CG, CD, CD_NE_length, CG_CD_NE_angle, CB_CG_CD_NE_diangle ) NE = Atom("NE", nitrogen_e, 0.0, 1.0, " ", " NE", 0, "N") carbon_z = calculateCoordinates( CG, CD, NE, NE_CZ_length, CD_NE_CZ_angle, CG_CD_NE_CZ_diangle ) CZ = Atom("CZ", carbon_z, 0.0, 1.0, " ", " CZ", 0, "C") nitrogen_h1 = calculateCoordinates( CD, NE, CZ, CZ_NH1_length, NE_CZ_NH1_angle, CD_NE_CZ_NH1_diangle ) NH1 = Atom("NH1", nitrogen_h1, 0.0, 1.0, " ", " NH1", 0, "N") nitrogen_h2 = calculateCoordinates( CD, NE, CZ, CZ_NH2_length, NE_CZ_NH2_angle, CD_NE_CZ_NH2_diangle ) NH2 = Atom("NH2", nitrogen_h2, 0.0, 1.0, " ", " NH2", 0, "N") res = Residue((" ", segID, " "), "ARG", " ") res.add(N) res.add(CA) res.add(C) res.add(O) res.add(CB) res.add(CG) res.add(CD) res.add(NE) res.add(CZ) res.add(NH1) res.add(NH2) return res
4539d48e37e7bacd637300136799b8f7b3dc635d
3,657,560
import json import traceback import tempfile import os import sys def uploadAssignment(req, courseId, assignmentId, archiveFile): """ Saves a temp file of the uploaded archive and calls vmchecker.submit.submit method to put the homework in the testing queue""" websutil.sanityCheckAssignmentId(assignmentId) websutil.sanityCheckCourseId(courseId) # Check permission req.content_type = 'text/html' s = Session.Session(req) if s.is_new(): s.invalidate() return json.dumps({'errorType':websutil.ERR_AUTH, 'errorMessage':"", 'errorTrace':""}) strout = websutil.OutputString() try: s.load() username = s['username'] except: traceback.print_exc(file = strout) return json.dumps({'errorType':websutil.ERR_EXCEPTION, 'errorMessage':"", 'errorTrace':strout.get()}) # Reset the timeout s.save() if not hasattr(archiveFile, "filename") or \ archiveFile.filename == None: return json.dumps({'errorType':websutil.ERR_OTHER, 'errorMessage':"File not uploaded.", 'errorTrace':""}) # Save file in a temp (fd, tmpname) = tempfile.mkstemp('.zip') f = open(tmpname, 'wb', 10000) ## Read the file in chunks for chunk in websutil.fbuffer(archiveFile.file): f.write(chunk) f.close() os.close(fd) # Call submit.py ## Redirect stdout to catch logging messages from submit strout = websutil.OutputString() sys.stdout = strout try: submit.submit(tmpname, assignmentId, username, courseId) update_db.update_grades(courseId, user=username, assignment=assignmentId) except submit.SubmittedTooSoonError: traceback.print_exc(file = strout) return json.dumps({'errorType':websutil.ERR_EXCEPTION, 'errorMessage':"The assignment was submitted too soon", 'errorTrace':strout.get()}) except submit.SubmittedTooLateError: traceback.print_exc(file = strout) return json.dumps({'errorType':websutil.ERR_EXCEPTION, 'errorMessage':"The assignment was submitted too late", 'errorTrace':strout.get()}) except: traceback.print_exc(file = strout) return json.dumps({'errorType':websutil.ERR_EXCEPTION, 'errorMessage':"", 'errorTrace':strout.get()}) return json.dumps({'status':True, 'dumpLog':strout.get(), 'file': tmpname})
03ae93e3d65a84b11115a520555b6b87bc3ec443
3,657,561
def shows_monthly_aggregate_score_heatmap(): """Monthly Aggregate Score Heatmap Graph""" database_connection.reconnect() all_scores = show_scores.retrieve_monthly_aggregate_scores(database_connection) if not all_scores: return render_template("shows/monthly-aggregate-score-heatmap/graph.html", years=None, scores=None) scores_list = [] years = list(all_scores.keys()) for year in all_scores: scores_list.append(list(all_scores[year].values())) return render_template("shows/monthly-aggregate-score-heatmap/graph.html", years=years, scores=scores_list)
4bf26e21c7d76be96395fce43228ee0a80930e4e
3,657,562
import requests def run(string, entities): """Call a url to create a api in github""" # db = utils.db()['db'] # query = utils.db()['query'] # operations = utils.db()['operations'] # apikey = utils.config('api_key') # playlistid = utils.config('playlist_id') # https://developers.google.com/youtube/v3/docs/playlistItems/list # url = 'https://www.googleapis.com/youtube/v3/playlistItems?part=snippet&maxResults=50&playlistId=' + playlistid + '&key=' + apikey nombreapi = '' nombredata = '' result = '' for item in entities: if item['entity'] == 'elapi': nombreapi = item['sourceText'].lower() for item in entities: if item['entity'] == 'eldata': nombretema = item['sourceText'].lower() url = 'https://youtochipizarron.herokuapp.com/' + nombreapi + '_' + nombredata utils.output('inter', 'checking', utils.translate('checking',{ 'website_name': url })) # call the url to create a github api branch/repository try: r = utils.http('GET', url) # In case there is a problem like wrong settings #if 'error' in r.json(): # error = r.json()['error']['errors'][0] # return utils.output('settings_error', 'settings_error', utils.translate('settings_errors', { # 'reason': error['reason'], # 'message': error['message'] # })) # items = r.json()['rooms'] result += utils.translate('list_element', { 'repository_url': url, 'repository_name': nombreapi + '_' + nombredata } ) except requests.exceptions.RequestException as e: return utils.output('request_error', 'request_error', utils.translate('request_errors')) # Will synchronize the content (because "end" type) if synchronization enabled return utils.output('end', 'success', utils.translate('success', { 'nuevoapi': nombreapi, 'nuevodata': nombredata, 'result': result }))
6a3a9899e8081c655e9a7eabc3e96f103a77a6bd
3,657,563
def gamma(surface_potential, temperature): """Calculate term from Gouy-Chapmann theory. Arguments: surface_potential: Electrostatic potential at the metal/solution boundary in Volts, e.g. 0.05 [V] temperature: Temperature of the solution in Kelvin, e.g. 300 [K] Returns: float """ product = sc.elementary_charge * surface_potential / (4 * sc.Stefan_Boltzmann * temperature) return np.tanh(product)
b8996f01bb221a5cd2f6c222d166a61f1759845f
3,657,564
def calculate_mask(maskimage, masks): """Extracts watershed seeds from data.""" dims = list(maskimage.slices2shape()) maskdata = np.ones(dims, dtype='bool') if masks: dataslices = utils.slices2dataslices(maskimage.slices) maskdata = utils.string_masks(masks, maskdata, dataslices) maskimage.write(data=maskdata, slices=maskimage.slices) return maskdata
4935cacb3689b844ab119ec3b24b9e59b7db7ec3
3,657,565
def Range(lo, hi, ctx = None): """Create the range regular expression over two sequences of length 1 >>> range = Range("a","z") >>> print(simplify(InRe("b", range))) True >>> print(simplify(InRe("bb", range))) False """ lo = _coerce_seq(lo, ctx) hi = _coerce_seq(hi, ctx) return ReRef(Z3_mk_re_range(lo.ctx_ref(), lo.ast, hi.ast), lo.ctx)
cb9cf3a334ba8509a54226c86c555257092a0951
3,657,566
import numpy def quantile(data, num_breaks): """ Calculate quantile breaks. Arguments: data -- Array of values to classify. num_breaks -- Number of breaks to perform. """ def scipy_mquantiles(a, prob=list([.25,.5,.75]), alphap=.4, betap=.4, axis=None, limit=()): """ function copied from scipy 0.13.3::scipy.stats.mstats.mquantiles """ def _quantiles1D(data,m,p): x = numpy.sort(data.compressed()) n = len(x) if n == 0: return numpy.ma.array(numpy.empty(len(p), dtype=float), mask=True) elif n == 1: return numpy.ma.array(numpy.resize(x, p.shape), mask=numpy.ma.nomask) aleph = (n*p + m) k = numpy.floor(aleph.clip(1, n-1)).astype(int) gamma = (aleph-k).clip(0,1) return (1.-gamma)*x[(k-1).tolist()] + gamma*x[k.tolist()] # Initialization & checks --------- data = numpy.ma.array(a, copy=False) if data.ndim > 2: raise TypeError("Array should be 2D at most !") # if limit: condition = (limit[0] < data) & (data < limit[1]) data[~condition.filled(True)] = numpy.ma.masked # p = numpy.array(prob, copy=False, ndmin=1) m = alphap + p*(1.-alphap-betap) # Computes quantiles along axis (or globally) if (axis is None): return _quantiles1D(data, m, p) return numpy.ma.apply_along_axis(_quantiles1D, axis, data, m, p) return scipy_mquantiles(data, numpy.linspace(1.0 / num_breaks, 1, num_breaks))
24486e39fcefb9e6cf969067836d1793b9f4a7c8
3,657,567
def extract_conformers_from_rdkit_mol_object(mol_obj, conf_ids): """ Generate xyz lists for all the conformers in conf_ids :param mol_obj: Molecule object :param conf_ids: (list) list of conformer ids to convert to xyz :return: (list(list(cgbind.atoms.Atom))) """ conformers = [] for i in range(len(conf_ids)): mol_block_lines = Chem.MolToMolBlock(mol_obj, confId=conf_ids[i]).split('\n') atoms = [] for line in mol_block_lines: split_line = line.split() if len(split_line) == 16: atom_label, x, y, z = split_line[3], split_line[0], split_line[1], split_line[2] atoms.append(Atom(atom_label, float(x), float(y), float(z))) conformer = BaseStruct() conformer.set_atoms(atoms) conformers.append(conformer) if len(conformers) == 0: raise CgbindCritical('Length of conformer xyz list was 0. RDKit failed') return conformers
821977c0be57441b5146c9d5ef02a19320cf5b91
3,657,568
def create_embedding(name: str, env_spec: EnvSpec, *args, **kwargs) -> Embedding: """ Create an embedding to use with sbi. :param name: identifier of the embedding :param env_spec: environment specification :param args: positional arguments forwarded to the embedding's constructor :param kwargs: keyword arguments forwarded to the embedding's constructor :return: embedding instance """ if name == LastStepEmbedding.name: embedding = LastStepEmbedding(env_spec, RolloutSamplerForSBI.get_dim_data(env_spec), *args, **kwargs) elif name == DeltaStepsEmbedding.name: embedding = DeltaStepsEmbedding(env_spec, RolloutSamplerForSBI.get_dim_data(env_spec), *args, **kwargs) elif name == BayesSimEmbedding.name: embedding = BayesSimEmbedding(env_spec, RolloutSamplerForSBI.get_dim_data(env_spec), *args, **kwargs) elif name == DynamicTimeWarpingEmbedding.name: embedding = DynamicTimeWarpingEmbedding(env_spec, RolloutSamplerForSBI.get_dim_data(env_spec), *args, **kwargs) elif name == RNNEmbedding.name: embedding = RNNEmbedding(env_spec, RolloutSamplerForSBI.get_dim_data(env_spec), *args, **kwargs) elif name == AllStepsEmbedding.name: embedding = AllStepsEmbedding(env_spec, RolloutSamplerForSBI.get_dim_data(env_spec), *args, **kwargs) else: raise pyrado.ValueErr( given_name=name, eq_constraint=f"{LastStepEmbedding.name}, {DeltaStepsEmbedding.name}, {BayesSimEmbedding.name}, " f"{DynamicTimeWarpingEmbedding.name}, or {RNNEmbedding.name}", ) return embedding
70f4651f5815f008670de08805249d0b9dfc39e9
3,657,569
def _init_allreduce_operators(length, split_indices): """ initialize allreduce communication operators""" indices = split_indices[0] fusion = split_indices[1] op_list = () j = 0 for i in range(length): if j <= len(indices)-1: temp = indices[j] else: temp = length if i >= temp: j = j + 1 fusion = fusion + 1 op = AllReduce('sum', GlobalComm.WORLD_COMM_GROUP) op.add_prim_attr('fusion', fusion) op_list = op_list + (op,) return op_list
91f752e049394b27340553830dce70074ef7ed81
3,657,570
def get_valid_fields(val: int, cs: dict) -> set: """ A value is valid if there's at least one field's interval which contains it. """ return { field for field, intervals in cs.items() if any(map(lambda i: i[0] <= val <= i[1], intervals)) }
3016e78637374eadf7d0e2029d060538fea86377
3,657,571
import glob import re def load_data_multiview(_path_features, _path_lables, coords, joints, cycles=3, test_size=0.1): """Generate multi-view train/test data from gait cycles. Args: _path_features (str): Path to gait sequence file _path_lables (str): Path to labels of corresponding gait sequence coords (int): Number of co-ordinates representing each joint in gait cycle joints (int)): Number of joints in the gait sequence cycles (int, optional): Time duration of gait cycle. Defaults to 3. test_size (float, optional): Ratio of test data. Defaults to 0.1. Returns: [list]: train and test data """ feature_files = glob.glob(_path_features) label_files = glob.glob(_path_lables) print(f'---> Number of files = {len(feature_files)}') # sorting files so that features and labels files match feature_files.sort() label_files.sort() angle_regex = re.compile('(\d*).h5') folder_regex = re.compile('(\w*)\/') all_data_train = [] all_data_test = [] all_labels_train = [] all_labels_test = [] all_angles_train = [] all_angles_test = [] for feature_file, label_file in zip(feature_files, label_files): ff = h5py.File(feature_file, 'r') fl = h5py.File(label_file, 'r') angle = int(angle_regex.search(feature_file).group(1)) folder = folder_regex.findall(feature_file)[-1] print(f"--->> processing - {folder} - {angle}") data_list = [] num_samples = len(ff.keys()) time_steps = 0 labels = np.empty(num_samples) for si in range(num_samples): ff_group_key = list(ff.keys())[si] data_list.append(list(ff[ff_group_key])) # Get the data time_steps_curr = len(ff[ff_group_key]) if time_steps_curr > time_steps: time_steps = time_steps_curr labels[si] = fl[list(fl.keys())[si]][()] data = np.empty((num_samples, time_steps*cycles, joints*coords)) for si in range(num_samples): data_list_curr = np.tile( data_list[si], (int(np.ceil(time_steps / len(data_list[si]))), 1)) for ci in range(cycles): data[si, time_steps * ci:time_steps * (ci + 1), :] = data_list_curr[0:time_steps] data_train, data_test, labels_train, labels_test = train_test_split(data, labels, test_size=test_size) all_data_train.extend(data_train) all_data_test.extend(data_test) all_labels_train.extend(labels_train) all_labels_test.extend(labels_test) all_angles_train.extend([angle]*len(labels_train)) all_angles_test.extend([angle]*len(labels_test)) return data, labels, \ all_data_train, all_labels_train, \ all_data_test, all_labels_test, \ all_angles_train, all_angles_test
574ca69bf6a6637b4ca53de05f8e792844e134bb
3,657,572
def T_ncdm(omega_ncdm, m_ncdm): # RELICS ONLY? """Returns T_ncdm as a function of omega_ncdm, m_ncdm. omega_ncdm : relative relic abundance. Unitless. m_ncdm : relic mass in units [eV]. T_ncdm : relic temperature in units [K] """ T_ncdm = (np.power( cf.NEUTRINO_SCALE_FACTOR * omega_ncdm / m_ncdm, 1./3.) * cf.RELIC_TEMP_SCALE) return T_ncdm
c3db4e4d2ac226f12afca3077bbc3436bd7a0459
3,657,573
import logging from datetime import datetime import subprocess def main(config: Config, dry_run: bool = False) -> int: """ Main entrypoint into the program. Takes specified snapshots if they don't exist and deletes old entrys as specified. :param config: The backup manager configuration. :param dry_run: Flag to indicate that no commands should be run :return: 0 on success, non-zero on failure """ zfs_path = which("zfs") if zfs_path is None: logging.critical("zfs command cannot be found") return 2 try: dataset_configs = get_dataset_configs(config) except RuntimeError as exc: logging.critical(exc) return 3 logging.debug( "Parsed dataset configs: \n\t%s", "\n\t".join((dumps(config) for config in dataset_configs)), ) today = datetime.now().date() for dataset_config in dataset_configs: if not ( dataset_config["keep_days"] > 0 or (dataset_config["keep_weeks"] > 0 and today.isoweekday() == dataset_config["dow"]) or (dataset_config["keep_months"] > 0 and today.day == dataset_config["dom"]) ): logging.debug("No snapshot scheduled for dataset %s", dataset_config["name"]) continue today_snapshot_name = "{}@{}{}".format( dataset_config["name"], config.get("snapshot_prefix", ""), today.strftime("%Y%m%d") ) if today in get_sorted_snapshots(config)[dataset_config["name"]]: logging.warning("Snapshot %s already exists", today_snapshot_name) continue cmd = ["zfs", "snapshot", today_snapshot_name] if dataset_config["recursive"]: cmd.insert(2, "-r") logging.info("Creating snapshot %s", today_snapshot_name) logging.debug("Running command: %s", cmd) if not dry_run: try: subprocess.check_output(cmd, stderr=subprocess.PIPE, encoding="utf-8") except subprocess.CalledProcessError as exc: logging.error("zfs command failed with error: %s", exc.stderr) # Cleanup snapshots dataset_snapshots = get_sorted_snapshots(config)[dataset_config["name"]] keep_daily_set = set(dataset_snapshots[: dataset_config["keep_days"]]) keep_weekly_set = set( [snapshot for snapshot in dataset_snapshots if snapshot.isoweekday() == dataset_config["dow"]][ : dataset_config["keep_weeks"] ] ) keep_monthly_set = set( [snapshot for snapshot in dataset_snapshots if snapshot.day == dataset_config["dom"]][ : dataset_config["keep_months"] ] ) keep_set = keep_daily_set | keep_weekly_set | keep_monthly_set for snapshot in set(dataset_snapshots) - keep_set: delete_snapshot_name = "{}@{}{}".format( dataset_config["name"], config.get("snapshot_prefix", ""), snapshot.strftime("%Y%m%d") ) cmd = [ "zfs", "destroy", delete_snapshot_name, ] if dataset_config["recursive"]: cmd.insert(2, "-r") logging.info("Destroying snapshot %s", delete_snapshot_name) logging.debug("Running command: %s", cmd) if not dry_run: try: subprocess.check_output(cmd, stderr=subprocess.PIPE, encoding="utf-8") except subprocess.CalledProcessError as exc: logging.error("zfs command failed with error: %s", exc.stderr) return 0
f3cf6967458c082f78cd47a4d5793a1fa8e130a2
3,657,574
import binascii def generate_initialisation_vector(): """Generates an initialisation vector for encryption.""" initialisation_vector = Random.new().read(AES.block_size) return (initialisation_vector, int(binascii.hexlify(initialisation_vector), 16))
4c05067d86cbf32de7f07b5d7483811c46307b64
3,657,575
def assign_score(relevant_set): """Assign score to each relevant element in descending order and return the score list.""" section = len(relevance[0])//3 score = [] s = 3 for i in range(3): if s == 1: num = len(relevance[0]) - len(score) score.extend([s]*num) else: score.extend([s]*section) s -= 1 return score
76a43780e1d1f37f7e0220ff0a0ca2ec484dd036
3,657,576
def visualize_img(img, cam, kp_pred, vert, renderer, kp_gt=None, text={}, rotated_view=False, mesh_color='blue', pad_vals=None, no_text=False): """ Visualizes the image with the ground truth keypoints and predicted keypoints on left and image with mesh on right. Keypoints should be in normalized coordinates, not image coordinates. Args: img: Image. cam (3x1): Camera parameters. kp_gt: Ground truth keypoints. kp_pred: Predicted keypoints. vert: Vertices. renderer: SMPL renderer. text (dict): Optional information to include in the image. rotated_view (bool): If True, also visualizes mesh from another angle. if pad_vals (2,) is not None, removes those values from the image (undo img pad to make square) Returns: Combined image. """ img_size = img.shape[0] text.update({'sc': cam[0], 'tx': cam[1], 'ty': cam[2]}) if kp_gt is not None: gt_vis = kp_gt[:, 2].astype(bool) loss = np.sum((kp_gt[gt_vis, :2] - kp_pred[gt_vis])**2) text['kpl'] = loss # Undo pre-processing. # Make sure img is [0-255] input_img = ((img + 1) * 0.5) * 255. rend_img = renderer(vert, cam=cam, img=input_img, color_name=mesh_color) if not no_text: rend_img = vis_util.draw_text(rend_img, text) # Draw skeletons pred_joint = ((kp_pred + 1) * 0.5) * img_size skel_img = vis_util.draw_skeleton(input_img, pred_joint) if kp_gt is not None: gt_joint = ((kp_gt[:, :2] + 1) * 0.5) * img_size skel_img = vis_util.draw_skeleton( skel_img, gt_joint, draw_edges=False, vis=gt_vis) if pad_vals is not None: skel_img = remove_pads(skel_img, pad_vals) rend_img = remove_pads(rend_img, pad_vals) if rotated_view: rot_img = renderer.rotated( vert, 90, cam=cam, alpha=False, color_name=mesh_color) if pad_vals is not None: rot_img = remove_pads(rot_img, pad_vals) return skel_img / 255, rend_img / 255, rot_img / 255 else: return skel_img / 255, rend_img / 255
eb182cdd4042595abfba3c399c20fd5bba0ca352
3,657,577
import os def _check_file_type_specific_bad_pattern(filepath, content): """Check the file content based on the file's extension. Args: filepath: str. Path of the file. content: str. Contents of the file. Returns: failed: bool. True if there is bad pattern else false. total_error_count: int. The number of errors. """ _, extension = os.path.splitext(filepath) pattern = BAD_PATTERNS_MAP.get(extension) failed = False total_error_count = 0 if pattern: for regexp in pattern: if _check_bad_pattern_in_file(filepath, content, regexp): failed = True total_error_count += 1 return failed, total_error_count
fe8817f77f5596d8c51173ab3dc48ef8c02f8bcb
3,657,578
import socket import requests from sys import version def _update(__version__, __code_name__, language, socks_proxy): """ update the framework Args: __version__: version number __code_name__: code name language: language socks_proxy: socks proxy Returns: True if success otherwise None """ try: if socks_proxy is not None: socks_version = socks.SOCKS5 if socks_proxy.startswith( 'socks5://') else socks.SOCKS4 socks_proxy = socks_proxy.rsplit('://')[1] socks.set_default_proxy(socks_version, str( socks_proxy.rsplit(':')[0]), int(socks_proxy.rsplit(':')[1])) socket.socket = socks.socksocket socket.getaddrinfo = getaddrinfo data = requests.get( url, headers={"User-Agent": "OWASP Nettacker"}).content if version() is 3: data = data.decode("utf-8") if __version__ + ' ' + __code_name__ == data.rsplit('\n')[0]: info(messages(language, "last_version")) else: warn(messages(language, "not_last_version")) warn(messages(language, "feature_unavailable")) except: warn(messages(language, "cannot_update")) return True
a35b9115e4c123aa771de238cac576a1df8532c1
3,657,579
import scipy def conv_noncart_to_cart(points, values, xrange, yrange, zrange): """ :param points: Data point locations (non-cartesian system) :param vals: Values corresponding to each data point :param xrange: Range of x values to include on output cartesian grid :param yrange: y :param zrange: z :return: 3d array with sides (xrange, yrange, zrange) of values """ # Get all points on cartesian grid specified xv, yv, zv = np.meshgrid(xrange, yrange, zrange) print(xv) print(yv) print(zv) # Determine interpolated values of points on the cartesian grid valarray = scipy.interpolate.griddata(points=points, values=values, xi=(xv, yv, zv), method="linear") # Returns 3D array of vals on cartesian grid return(valarray)
cba013444ecdbd4abec14008dc6894e306244087
3,657,580
import urllib import json def createColumnsFromJson(json_file, defaultMaximumSize=250): """Create a list of Synapse Table Columns from a Synapse annotations JSON file. This creates a list of columns; if the column is a 'STRING' and defaultMaximumSize is specified, change the default maximum size for that column. """ f = urllib.urlopen(path2url(json_file)) data = json.load(f) cols = [] for d in data: d['enumValues'] = [a['value'] for a in d['enumValues']] if d['columnType'] == 'STRING' and defaultMaximumSize: d['maximumSize'] = defaultMaximumSize cols.append(synapseclient.Column(**d)) return cols
9eba1d44a9fec8e92b6b95036821d48d68cd991b
3,657,581
from typing import Callable from re import T from typing import Optional import warnings def record( fn: Callable[..., T], error_handler: Optional[ErrorHandler] = None ) -> Callable[..., T]: """ Syntactic sugar to record errors/exceptions that happened in the decorated function using the provided ``error_handler``. Using this decorator is equivalent to: :: error_handler = get_error_handler() error_handler.initialize() try: foobar() except ChildFailedError as e: _, failure = e.get_first_failure() error_handler.dump_error_file(failure.error_file, failure.exitcode) raise except Exception as e: error_handler.record(e) raise .. important:: use this decorator once per process at the top level method, typically this is the main method. Example :: @record def main(): pass if __name__=="__main__": main() """ if not error_handler: error_handler = get_error_handler() def wrap(f): @wraps(f) def wrapper(*args, **kwargs): assert error_handler is not None # assertion for mypy type checker error_handler.initialize() try: return f(*args, **kwargs) except ChildFailedError as e: rank, failure = e.get_first_failure() if failure.error_file != _NOT_AVAILABLE: error_handler.dump_error_file(failure.error_file, failure.exitcode) else: warnings.warn(_no_error_file_warning_msg(rank, failure)) raise except Exception as e: error_handler.record_exception(e) raise return wrapper return wrap(fn)
e538c51aeb4234aa85d90d9978d228bf0f505aac
3,657,582
import torch def bbox_overlaps_2D(boxes1, boxes2): """Computes IoU overlaps between two sets of boxes. boxes1, boxes2: [N, (y1, x1, y2, x2)]. """ # 1. Tile boxes2 and repeate boxes1. This allows us to compare # every boxes1 against every boxes2 without loops. # TF doesn't have an equivalent to np.repeate() so simulate it # using tf.tile() and tf.reshape. boxes1_repeat = boxes2.size()[0] boxes2_repeat = boxes1.size()[0] boxes1 = boxes1.repeat(1,boxes1_repeat).view(-1,4) boxes2 = boxes2.repeat(boxes2_repeat,1) # 2. Compute intersections b1_y1, b1_x1, b1_y2, b1_x2 = boxes1.chunk(4, dim=1) b2_y1, b2_x1, b2_y2, b2_x2 = boxes2.chunk(4, dim=1) y1 = torch.max(b1_y1, b2_y1)[:, 0] x1 = torch.max(b1_x1, b2_x1)[:, 0] y2 = torch.min(b1_y2, b2_y2)[:, 0] x2 = torch.min(b1_x2, b2_x2)[:, 0] zeros = Variable(torch.zeros(y1.size()[0]), requires_grad=False) if y1.is_cuda: zeros = zeros.cuda() intersection = torch.max(x2 - x1, zeros) * torch.max(y2 - y1, zeros) # 3. Compute unions b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1) b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1) union = b1_area[:,0] + b2_area[:,0] - intersection # 4. Compute IoU and reshape to [boxes1, boxes2] iou = intersection / union overlaps = iou.view(boxes2_repeat, boxes1_repeat) return overlaps
86920dac357285b3681629474ed5aaad471ed7f8
3,657,583
def decoder(data): """ This generator processes a sequence of bytes in Modified UTF-8 encoding and produces a sequence of unicode string characters. It takes bits from the byte until it matches one of the known encoding sequences. It uses ``DecodeMap`` to mask, compare and generate values. :param data: a string of bytes in Modified UTF-8 encoding. :return: a generator producing a string of unicode characters :raises UnicodeDecodeError: unrecognised byte in sequence encountered. """ def next_byte(_it, start, count): try: return next(_it)[1] except StopIteration: raise UnicodeDecodeError( NAME, data, start, start + count, "incomplete byte sequence" ) it = iter(enumerate(data)) for i, d in it: if d == 0x00: # 00000000 raise UnicodeDecodeError( NAME, data, i, i + 1, "embedded zero-byte not allowed" ) if d & 0x80: # 1xxxxxxx if d & 0x40: # 11xxxxxx if d & 0x20: # 111xxxxx if d & 0x10: # 1111xxxx raise UnicodeDecodeError( NAME, data, i, i + 1, "invalid encoding character" ) if d == 0xED: value = 0 for i1, dm in enumerate(DECODE_MAP[6]): d1 = next_byte(it, i, i1 + 1) value = dm.apply(d1, value, data, i, i1 + 1) else: # 1110xxxx value = d & 0x0F for i1, dm in enumerate(DECODE_MAP[3]): d1 = next_byte(it, i, i1 + 1) value = dm.apply(d1, value, data, i, i1 + 1) else: # 110xxxxx value = d & 0x1F for i1, dm in enumerate(DECODE_MAP[2]): d1 = next_byte(it, i, i1 + 1) value = dm.apply(d1, value, data, i, i1 + 1) else: # 10xxxxxx raise UnicodeDecodeError( NAME, data, i, i + 1, "misplaced continuation character" ) else: # 0xxxxxxx value = d # noinspection PyCompatibility yield mutf8_unichr(value)
217f52081a476ef1c48d2d34d020ec6c7c9e1989
3,657,584
def calculate_exvolume_redfactor(): """ Calculates DEER background reduction factor alpha(d) See Kattnig et al J.Phys. Chem. B, 117, 16542 (2013) https://doi.org/10.1021/jp408338q The background reduction factor alpha(d) is defined in Eq.(18) For large d, one can use the limiting expression alpha = (3/2/pi)*(2*pi/3-sqrt(3)./d) as an excellent approximation (error at d """ def KK(d): q = np.sqrt(6*d/pi) S,C = scp.special.fresnel(q) y = 1 - (np.cos(d)*C+np.sin(d)*S)/q y[y==0] = 0 return y def h(d): d = np.atleast_1d(d) y = np.zeros(np.shape(d)) for k in range(len(d)): y[k],_ = scp.integrate.quad(lambda x:(1-x**2)*Si((1-x**2)*d[k]),0,np.sqrt(3)) return y def Si(t): t = np.atleast_1d(t) y = np.zeros(np.shape(t)) for k in range(len(t)): y[k],_ = scp.integrate.quad(lambda x:np.sin(x)/(x+np.finfo(float).eps),0,t[k],limit=1000) y[y==0] = 0 return y # Set up dR range #------------------------------------------------------------------------------- # dR = A*t/R^3, where t is time, R is excluded-volume radius, and A is the # dipolar constant (in units compatible with t and R) dRlin = np.arange(0,20,0.05) dRlog = 10**(np.arange(1,3,0.05)) dRlog = np.delete(dRlog, np.where(dRlog < max(dRlin))) dR = np.concatenate((dRlin, dRlog)) # Evaluate reduction factor alpha as a function of dR #------------------------------------------------------------------------------- h_ = h(dR) K_ = KK(dR) alpha = (3/2/pi)*(h_ - np.sqrt(3)/dR*K_) alpha[dR==0] = 0 return alpha
3583e1526f1636feaa86a77c7f5ba51d816abe26
3,657,585
def get_successors(graph): """Returns a dict of all successors of each node.""" d = {} for e in graph.get_edge_list(): src = e.get_source() dst = e.get_destination() if src in d.keys(): d[src].add(dst) else: d[src] = set([dst]) return d
1ec7b0ab8772dc738758bb14fe4abd5dd4b9074e
3,657,586
def readDataTable2o2(request): """Vuetify練習""" form1Textarea1 = request.POST["textarea1"] template = loader.get_template( 'webapp1/practice/vuetify-data-table2.html') # ----------------------------------------- # 1 # 1. host1/webapp1/templates/webapp1/practice/vuetify-data-table2.html を取ってきます。 # ----------------------------------------- context = { 'dessertsJson': form1Textarea1 } return HttpResponse(template.render(context, request))
01688c20fa5057829338bbd76520a7b0510923ad
3,657,587
from .sigma import decode_cf_sigma from .grid import decode_cf_dz2depth def get_depth(da, errors="raise"): """Get or compute the depth coordinate If a depth variable cannot be found, it tries to compute either from sigma-like coordinates or from layer thinknesses. Parameters ---------- {errors} Return ------ xarray.DataArray or None See also -------- get_lon get_lat get_time get_altitude get_level get_vertical xoa.cf.CFSpecs.search_coord xoa.sigma.decode_cf_sigma xoa.grid.decode_cf_dz2depth """ cfspecs = xcf.get_cf_specs(da) errors = misc.ERRORS[errors] ztype = cfspecs["vertical"]["type"] # From variable depth = cfspecs.search(da, 'depth', errors="ignore") if depth is not None: return depth if ztype == "z" or not hasattr(da, "data_vars"): # explicitly msg = "No depth coordinate found" if errors == "raise": raise XoaError(msg) xoa_warn(msg) return # Decode the dataset if ztype == "sigma" or ztype is None: err = "ignore" if ztype is None else errors da = decode_cf_sigma(da, errors=err) if "depth" in da: return da.depth if ztype == "dz2depth" or ztype is None: err = "ignore" if ztype is None else errors da = decode_cf_dz2depth(da, errors=err) if "depth" in da: return da.depth msg = "Can't infer depth coordinate from dataset" if errors == "raise": raise XoaError(msg) xoa_warn(msg)
048208629eef6e5ecf238212e7a865e5fbaea993
3,657,588
def route53_scan(assets, record_value, record): """ Scan Route53 """ for i, asset in enumerate(assets): asset_type = asset.get_type() if asset_type == 'EC2' and record_value in (asset.public_ip, asset.private_ip): assets[i].dns_record = record['Name'].replace('\\052', '*') elif asset_type == 'ELBV2' and record_value == f'{asset.name}.': assets[i].dns_record = record['Name'].replace('\\052', '*') return assets
eccbb2d716ef7b5dd713e7fbbd210c246c97347d
3,657,589
import requests def process_language(text): """ Fetch from language processing API (cloud function) :param text: :return: """ # The language processing seems to fail without acsii decoding, ie remove emoji and chinese characters request = { "text": text.encode("ascii", errors="ignore").decode() } response = requests.post(LANGUAGE_PROCESSOR_API, json=request) if response.status_code == 500: print(f"Language processing error {response}") return {} else: return response.json()
d5b164cf0722093988f7cbb3f93ef62bc7c98758
3,657,590
def to_int(text): """Text to integer.""" try: return int(text) except ValueError: return ''
d870ee05c3117111adcf85c91038b19beaf9585b
3,657,591
import os import json import tokenize def parse_commonsense_reasoning_test(test_data_name): """Read JSON test data.""" with tf.gfile.Open(os.path.join( FLAGS.data_dir, 'commonsense_test', '{}.json'.format(test_data_name)), 'r') as f: data = json.load(f) question_ids = [d['question_id'] for d in data] sentences = [tokenize(d['substitution']) for d in data] labels = [d['correctness'] for d in data] return question_ids, sentences, labels
b3d83a93ecece3813a558dfc7c5eb7757e153974
3,657,592
def flooding(loss, b): """flooding loss """ return (loss - b).abs() + b
c34eedf0421b60e27bd813381ff7dfe96a3912eb
3,657,593
def CreateConditions(p,avec,bvec,indexgenerator=CreateLyndonIndices): """This creates the set of equations using by default the Lyndon Basis elements. Parameters ---------- p : the considered order avec: The set of symbols to use for the first operator. bvec: The set of symbols to use for the second operator. indexgenerator: (optional) by default we use indexgenerator for the Lyndon indices. Using CreateMuVectors the indices from the overcomplete Hall-Basis can be used. Returns ------- array : An array of Equations that have to be satisfied to fulfill the requested order p. """ cvec=[*accumulate(avec)] cvec[-1]=1 retval = [Eq(sum(avec),1)] for k in range(1,p+1): vecs=indexgenerator(p,k) for mu in vecs: retval.append(Eq(CreateEquation(mu,bvec,cvec),0)) return retval
61ed4373d18a730838110865c8d4334176427bc4
3,657,594
def with_conf_blddir(conf, name, body, func): """'Context manager' to execute a series of tasks into code-specific build directory. func must be a callable taking no arguments """ old_root, new_root = create_conf_blddir(conf, name, body) try: conf.bld_root = new_root conf.bld_root.ctx.bldnode = new_root return func() finally: conf.bld_root = old_root conf.bld_root.ctx.bldnode = old_root
b01af0d8a44ad432020cc800f334f4de50b5036d
3,657,595
def many_to_one(clsname, **kw): """Use an event to build a many-to-one relationship on a class. This makes use of the :meth:`.References._reference_table` method to generate a full foreign key relationship to the remote table. """ @declared_attr def m2o(cls): cls._references((cls.__name__, clsname)) return relationship(clsname, **kw) return m2o
528f6391535a437383750346318ac65acaa8dfdc
3,657,596
import sqlite3 def get_cnx(dbname=None, write=False): """Return a new connection to the database by the given name. If 'dbname' is None, return a connection to the system database. If the database file does not exist, it will be created. The OS-level file permissions are set in DbSaver. """ if dbname is None: dbname = constants.SYSTEM dbpath = get_dbpath(dbname) if write: cnx = sqlite3.connect(dbpath) else: path = f"file:{dbpath}?mode=ro" cnx = sqlite3.connect(dbpath, uri=True) cnx.row_factory = sqlite3.Row return cnx
f2e3cc300fa4cb122a9fe4705d41878332929702
3,657,597
def nir_mean(msarr,nir_band=7): """ Calculate the mean of the (unmasked) values of the NIR (near infrared) band of an image array. The default `nir_band` value of 7 selects the NIR2 band in WorldView-2 imagery. If you're working with a different type of imagery, you will need figure out the appropriate value to use instead. Parameters ---------- msarr : numpy array (RxCxBands shape) The multispectral image array. See `OpticalRS.RasterDS` for more info. nir_band : int (Default value = 7) The default `nir_band` value of 7 selects the NIR2 band in WorldView-2 imagery. If you're working with a different type of imagery, you will need figure out the appropriate value to use instead. This is a zero indexed number (the first band is 0, not 1). Returns ------- float The mean radiance in the NIR band. """ return msarr[...,nir_band].mean()
7ba6ea8b7d51b8942a0597f2f89a05ecbee9f46e
3,657,598
def decode(invoice) -> LightningInvoice: """ @invoice: is a str, bolt11. """ client = CreateLightningClient() try: decode_response = client.call("decode", invoice) assert decode_response.get("error") is None result = decode_response["result"] assert result["valid"], "decode is invalid" invoice = LightningInvoice() invoice.msatoshi = result["msatoshi"] invoice.description: str = result["description"] return invoice finally: client.close()
c713ec8708214312b84103bceb64e0876d23bc29
3,657,599