content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def lh_fus(temp): """latent heat of fusion Args: temp (float or array): temperature [K] Returns: float or array: latent heat of fusion """ return 3.336e5 + 1.6667e2 * (FREEZE - temp)
8127970612b031d2aaf7598379f41b549a3268e1
3,649,620
def to_eaf(file_path, eaf_obj, pretty=True): """ modified function from https://github.com/dopefishh/pympi/blob/master/pympi/Elan.py Write an Eaf object to file. :param str file_path: Filepath to write to, - for stdout. :param pympi.Elan.Eaf eaf_obj: Object to write. :param bool pretty: Flag to set pretty printing. """ def rm_none(x): try: # Ugly hack to test if s is a string in py3 and py2 basestring def isstr(s): return isinstance(s, basestring) except NameError: def isstr(s): return isinstance(s, str) return {k: v if isstr(v) else str(v) for k, v in x.items() if v is not None} # Annotation Document ADOCUMENT = etree.Element('ANNOTATION_DOCUMENT', eaf_obj.adocument) # Licence for m in eaf_obj.licenses: n = etree.SubElement(ADOCUMENT, 'LICENSE', {'LICENSE_URL': m[1]}) n.text = m[0] # Header HEADER = etree.SubElement(ADOCUMENT, 'HEADER', eaf_obj.header) # Media descriptiors for m in eaf_obj.media_descriptors: etree.SubElement(HEADER, 'MEDIA_DESCRIPTOR', rm_none(m)) # Linked file descriptors for m in eaf_obj.linked_file_descriptors: etree.SubElement(HEADER, 'LINKED_FILE_DESCRIPTOR', rm_none(m)) # Properties for k, v in eaf_obj.properties: etree.SubElement(HEADER, 'PROPERTY', {'NAME': k}).text = str(v) # Time order TIME_ORDER = etree.SubElement(ADOCUMENT, 'TIME_ORDER') for t in sorted(eaf_obj.timeslots.items(), key=lambda x: int(x[0][2:])): etree.SubElement(TIME_ORDER, 'TIME_SLOT', rm_none( {'TIME_SLOT_ID': t[0], 'TIME_VALUE': t[1]})) # Tiers for t in sorted(eaf_obj.tiers.items(), key=lambda x: x[1][3]): tier = etree.SubElement(ADOCUMENT, 'TIER', rm_none(t[1][2])) for a in t[1][0].items(): ann = etree.SubElement(tier, 'ANNOTATION') alan = etree.SubElement(ann, 'ALIGNABLE_ANNOTATION', rm_none( {'ANNOTATION_ID': a[0], 'TIME_SLOT_REF1': a[1][0], 'TIME_SLOT_REF2': a[1][1], 'SVG_REF': a[1][3]})) etree.SubElement(alan, 'ANNOTATION_VALUE').text = a[1][2] for a in t[1][1].items(): ann = etree.SubElement(tier, 'ANNOTATION') rean = etree.SubElement(ann, 'REF_ANNOTATION', rm_none( {'ANNOTATION_ID': a[0], 'ANNOTATION_REF': a[1][0], 'PREVIOUS_ANNOTATION': a[1][2], 'SVG_REF': a[1][3]})) etree.SubElement(rean, 'ANNOTATION_VALUE').text = a[1][1] # Linguistic types for l in eaf_obj.linguistic_types.values(): etree.SubElement(ADOCUMENT, 'LINGUISTIC_TYPE', rm_none(l)) # Locales for lc, (cc, vr) in eaf_obj.locales.items(): etree.SubElement(ADOCUMENT, 'LOCALE', rm_none( {'LANGUAGE_CODE': lc, 'COUNTRY_CODE': cc, 'VARIANT': vr})) # Languages for lid, (ldef, label) in eaf_obj.languages.items(): etree.SubElement(ADOCUMENT, 'LANGUAGE', rm_none( {'LANG_ID': lid, 'LANG_DEF': ldef, 'LANG_LABEL': label})) # Constraints for l in eaf_obj.constraints.items(): etree.SubElement(ADOCUMENT, 'CONSTRAINT', rm_none( {'STEREOTYPE': l[0], 'DESCRIPTION': l[1]})) # Controlled vocabularies for cvid, (descriptions, cv_entries, ext_ref) in\ eaf_obj.controlled_vocabularies.items(): cv = etree.SubElement(ADOCUMENT, 'CONTROLLED_VOCABULARY', rm_none({'CV_ID': cvid, 'EXT_REF': ext_ref})) for lang_ref, description in descriptions: des = etree.SubElement(cv, 'DESCRIPTION', {'LANG_REF': lang_ref}) if description: des.text = description for cveid, (values, ext_ref) in cv_entries.items(): cem = etree.SubElement(cv, 'CV_ENTRY_ML', rm_none({ 'CVE_ID': cveid, 'EXT_REF': ext_ref})) for value, lang_ref, description in values: val = etree.SubElement(cem, 'CVE_VALUE', rm_none({ 'LANG_REF': lang_ref, 'DESCRIPTION': description})) val.text = value # Lexicon refs for l in eaf_obj.lexicon_refs.values(): etree.SubElement(ADOCUMENT, 'LEXICON_REF', rm_none(l)) # Exteral refs for eid, (etype, value) in eaf_obj.external_refs.items(): etree.SubElement(ADOCUMENT, 'EXTERNAL_REF', rm_none( {'EXT_REF_ID': eid, 'TYPE': etype, 'VALUE': value})) # https://github.com/dopefishh/pympi/blob/master/pympi/Elan.py return '<?xml version="1.0" encoding="UTF-8"?>'+etree.tostring(ADOCUMENT, encoding='utf-8').decode("utf-8")
605e7f711f34661daae6869419d6f8bebb05a2c4
3,649,621
def query_for_build_status(service, branch, target, starting_build_id): """Query Android Build Service for the status of the 4 builds in the target branch whose build IDs are >= to the provided build ID""" try: print ('Querying Android Build APIs for builds of {} on {} starting at' ' buildID {}').format(target, branch, starting_build_id) return service.build().list(buildType='submitted', branch=branch, target=target, maxResults='4', startBuildId=starting_build_id).execute() except errors.HttpError as error: print 'HTTP Error while attempting to query the build status.' print error return None
4e1e04dae1ce13217374207a1b57d7380552dfc5
3,649,623
def create_pool( dsn=None, *, min_size=10, max_size=10, max_queries=50000, max_inactive_connection_lifetime=300.0, setup=None, init=None, loop=None, authenticator=None, **connect_kwargs, ): """Create an Asyncpg connection pool through Approzium authentication. Takes same arguments as ``asyncpg.create_pool`` in addition to the `authenticator` argument :return: An instance of :class:`~approzium.asyncpg.pool._ApproziumPool`. Example: .. code-block:: python >>> import approzium >>> from approzium.asyncpg import create_pool >>> auth = approzium.AuthClient("myauthenticator.com:6001", disable_tls=True) >>> pool = await create_pool(user='postgres', authenticator=auth) >>> con = await pool.acquire() >>> try: ... await con.fetch('SELECT 1') ... finally: ... await pool.release(con) """ return _ApproziumPool( dsn, connection_class=Connection, min_size=min_size, max_size=max_size, max_queries=max_queries, loop=loop, setup=setup, init=init, max_inactive_connection_lifetime=max_inactive_connection_lifetime, authenticator=authenticator, **connect_kwargs, )
0b50a4cba07fb4797e04cc384dd46d1e21deed12
3,649,624
import logging def _get_all_schedule_profile_entries_v1(profile_name, **kwargs): """ Perform a GET call to get all entries of a QoS schedule profile :param profile_name: Alphanumeric name of the schedule profile :param kwargs: keyword s: requests.session object with loaded cookie jar keyword url: URL in main() function :return: Dictionary containing schedule profile entry URIs """ target_url = kwargs["url"] + "system/qos/%s/queues" % profile_name response = kwargs["s"].get(target_url, verify=False) if not common_ops._response_ok(response, "GET"): logging.warning("FAIL: Getting dictionary of URIs of entries in QoS schedule profile '%s' failed with status code %d: %s" % (profile_name, response.status_code, response.text)) else: logging.info("SUCCESS: Getting dictionary of URIs of entries in QoS schedule profile '%s' succeeded" % profile_name) schedule_profile_entries = response.json() # for some reason, this API returns a list when empty, and a dictionary when there is data # make this function always return a dictionary if not schedule_profile_entries: return {} else: return schedule_profile_entries
32d6278ce6704feb5831012c2d0050b226fc7dfa
3,649,625
def loadSource(path): """Loads a list of transportReactions. Format: R("Macgamb_Transp") R("Madnb_Transp") R("MalaDb_Transp")...""" file = open(path, 'r') sources = [line.strip() for line in file] file.close() return sources
244e9e5619a5039822ef14dfbb3d99b55cb6cc74
3,649,626
from typing import Optional import struct def frombin( __data: Bitcode, __dtype: SupportedDataType | bytes, num: int = 1, *, encoding: Optional[str] = None, signed: bool = True, ) -> ValidDataset: """converts a string of 0 and 1 back into the original data Args: data (BinaryCode): a string of 0 and 1 dtype (Union[int, float, str]): the desired data type to convert to Raises: TypeError: if the desired datatype is not of the integer, floats or strings data type Returns: Union[int, float, str]: converted data """ if __dtype is int: stop = len(__data) step = stop // num if signed: decoded_data = [None] * num for index, i in enumerate(range(0, stop, step)): bindata = __data[i : i + step] decoded_data[index] = int("-%s" % (bindata) if bindata[0] == "1" else bindata, 2) else: decoded_data = [int(__data[i : i + step], 2) for i in range(0, stop, step)] return decoded_data if num != 1 else decoded_data[0] bytedata = int(__data, 2).to_bytes((len(__data) + 7) // 8, config.ENDIAN) if __dtype in ("s", str): return "".join(bytes.decode(bytedata, encoding or config.DEFAULT_STR_FORMAT)) else: try: decoded_data = list( struct.unpack("%s%s%s" % (">" if config.ENDIAN == "big" else "<", num, __dtype), bytedata) ) return decoded_data if num != 1 else decoded_data[0] except struct.error: raise TypeError(f"cannot convert byte data to '{__dtype}'")
6fa7219ea8622071c7bb3277c8b59717543e9286
3,649,627
def check_size(): """Assumes the problem size has been set by set_size before some operation. This checks if the size was changed Size is defined as (PIs, POs, ANDS, FF, max_bmc) Returns TRUE is size is the same""" global npi, npo, nands, nff, nmd #print n_pis(),n_pos(),n_ands(),n_latches() result = ((npi == n_pis()) and (npo == n_pos()) and (nands == n_ands()) and (nff == n_latches()) ) return result
361edb3b4f20a3ae4920c784ad2d1c56fe35e2d6
3,649,628
def vrms2dbm(vp): """ Converts a scalar or a numpy array from volts RMS to dbm assuming there is an impedence of 50 Ohm Arguments: - vp: scalar or numpy array containig values in volt RMS to be converted in dmb Returns: - scalar or numpy array containing the result """ return 10. * np.log10(20. * (vp) ** 2.)
7d0f76ab74cf82d2d56f97840153f1b9bc3cb8a8
3,649,629
def aa_i2c_read (aardvark, slave_addr, flags, data_in): """usage: (int return, u08[] data_in) = aa_i2c_read(Aardvark aardvark, u16 slave_addr, AardvarkI2cFlags flags, u08[] data_in) All arrays can be passed into the API as an ArrayType object or as a tuple (array, length), where array is an ArrayType object and length is an integer. The user-specified length would then serve as the length argument to the API funtion (please refer to the product datasheet). If only the array is provided, the array's intrinsic length is used as the argument to the underlying API function. Additionally, for arrays that are filled by the API function, an integer can be passed in place of the array argument and the API will automatically create an array of that length. All output arrays, whether passed in or generated, are passed back in the returned tuple.""" if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY # data_in pre-processing __data_in = isinstance(data_in, int) if __data_in: (data_in, num_bytes) = (array_u08(data_in), data_in) else: (data_in, num_bytes) = isinstance(data_in, ArrayType) and (data_in, len(data_in)) or (data_in[0], min(len(data_in[0]), int(data_in[1]))) if data_in.typecode != 'B': raise TypeError("type for 'data_in' must be array('B')") # Call API function (_ret_) = api.py_aa_i2c_read(aardvark, slave_addr, flags, num_bytes, data_in) # data_in post-processing if __data_in: del data_in[max(0, min(_ret_, len(data_in))):] return (_ret_, data_in)
59cca99e3ae811e957f9dd053205f3639c1451a4
3,649,630
def urbandictionary_search(search): """ Searches urbandictionary's API for a given search term. :param search: The search term str to search for. :return: definition str or None on no match or error. """ if str(search).strip(): urban_api_url = 'http://api.urbandictionary.com/v0/define?term=%s' % search response = util.web.http_get(url=urban_api_url, json=True) if response['json'] is not None: try: definition = response['json']['list'][0]['definition'] return definition.encode('ascii', 'ignore') except (KeyError, IndexError): return None else: return None
3cd63486adc11f3ca20d4cd6216006d3f2d2239f
3,649,632
def Performance(ALGORITHM_CONFIG, CELLULAR_MODEL_CONFIG, alog_name): """ Performance testing """ # Server profile: num_ues=200, APs=16, Scale=200.0, explore_radius=1 loadbalanceRL = interface.Rainman2(SETTINGS) loadbalanceRL.algorithm_config = ALGORITHM_CONFIG loadbalanceRL.environment_config = CELLULAR_MODEL_CONFIG if alog_name=='linear': result_linear = loadbalanceRL.run_experiment( 'Cellular', 'Qlearning', 'LinearRegression') return result_linear if alog_name=='Naive': result_Naive = loadbalanceRL.run_experiment( 'Cellular', 'Qlearning', 'Naive') return result_Naive if alog_name=='NN': result_NN = loadbalanceRL.run_experiment( 'Cellular', 'Qlearning', 'NN') return result_NN if alog_name=='DQN': result_DQN = loadbalanceRL.run_experiment( 'Cellular', 'Qlearning', 'DQN') return result_DQN
87e5d6b0c400af0262b6a2c746e855b9b71a5c35
3,649,633
def launch(sid): """ Launch a scan Launch the scan specified by the sid. """ data = connect('POST', '/scans/{0}/launch'.format(sid)) return data['scan_uuid']
fa99e7a50e9e2ddb30ba131ebd61c998c2cdabaa
3,649,634
import ast def transpose_dict(data, data_key): """Function: transpose_dict Description: Transpose specified keys in a list of dictionaries to specified data types or None. Arguments: (input) data -> Initial list of dictionaries. (input) data_key -> Dictionary of keys and data types. (output) mod_data -> Modified list of dictionaries. """ data = list(data) data_key = dict(data_key) mod_data = list() literal_list = ["bool", "list"] for list_item in data: list_item = dict(list_item) for item in set(list_item.keys()) & set(data_key.keys()): if not list_item[item] or list_item[item] == "None": list_item[item] = None elif data_key[item] == "int": list_item[item] = int(list_item[item]) elif data_key[item] in literal_list: list_item[item] = ast.literal_eval(list_item[item]) mod_data.append(list_item) return mod_data
7675ea2f80e9e85993dc99a2a31df04abfeba2c8
3,649,635
def aligner_to_symbol(calls): """ Assign symbols to different aligners in the input file Set the attribute of the class instances return a list of indices for which each aligner is found uniquely and all aligners sorted by aligners """ symbols = ['o', '+', 'x', 'v', '*', 'D', 's', 'p', '8', 'X'] aligners = sorted(set([c.aligner for c in calls]), reverse=True) aligner_to_symbol_dict = {a: s for a, s in zip(aligners, symbols)} for c in calls: c.shape = aligner_to_symbol_dict[c.aligner] index_and_aligners = zip([[c.aligner for c in calls].index(i) for i in aligners], aligners) return zip(*sorted(index_and_aligners, key=lambda x: x[1]))
b9cef3ae33b6ce84daf78a8bc8ce528f97d7a8a6
3,649,636
def nfvi_create_subnet(network_uuid, subnet_name, ip_version, subnet_ip, subnet_prefix, gateway_ip, dhcp_enabled, callback): """ Create a subnet """ cmd_id = _network_plugin.invoke_plugin('create_subnet', network_uuid, subnet_name, ip_version, subnet_ip, subnet_prefix, gateway_ip, dhcp_enabled, callback=callback) return cmd_id
383a0ffeb6e364f761c8d4038bf8e53f367021c1
3,649,638
def convertCRS(powerplants, substations, towers, crs, grid): """ :param powerplants: :param substations: :param towers: :param crs: :return: """ substations.to_crs(crs) # powerplants = powerplants.set_crs(crs) # powerplants = powerplants.to_crs(crs) # print(powerplants.crs) towers = towers.to_crs(crs) return(substations, powerplants, towers, grid)
9fcb8c51323c00935ba2c882502a273f2bf532ff
3,649,639
def get_pathway(page_name, end_pg, max_len, trail, paths): """ Finds a list of all paths from a starting wikipedia page to an end page Assumes page_name is a valid wikipedia article title and end_pg is a valid Wikipedia Page Object Args: page_name: (Str) The name of the current article end_pg: (Wikipedia Page) The page the path should end at max_len: (Int) The number of maximum steps between the start page and the end page trail: (List) The current path being searched Paths: (List) A set of all the paths between the starting page and the end page Returns nothing but appends a given list of paths """ trail.append(page_name) # add the current page to the current trail # Check if the page has the the end page as a link and # add it to thhe list of paths if h.has_end(page_name, end_pg): # if the page contains a link to the end page # add the end page to the trail, and add the trail to the paths list trail.append(end_pg.title) paths.append(trail) print(f"**Pathway {len(paths)}**: {h.plot_path(trail)}") return None # if the trail is above the maximum length return none elif max_len <= 1: print(f"Not a path: {trail}") return None else: # Check each of the links in the page # Continue branching looking for the end for link in h.get_links(page_name): if link not in trail: if h.is_page(link): get_pathway(link, end_pg, max_len - 1, trail[:], paths)
3b8effcb1f5295a854d32cc6438093f5ba7c1fa4
3,649,640
def clip_to_ndc(point_clip_space, name="clip_to_ndc"): """Transforms points from clip to normalized device coordinates (ndc). Note: In the following, A1 to An are optional batch dimensions. Args: point_clip_space: A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents points in clip space. name: A name for this op. Defaults to "clip_to_ndc". Raises: ValueError: If `point_clip_space` is not of size 4 in its last dimension. Returns: A tensor of shape `[A1, ..., An, 3]`, containing `point_clip_space` in normalized device coordinates. """ with tf.name_scope(name): point_clip_space = tf.convert_to_tensor(value=point_clip_space) shape.check_static( tensor=point_clip_space, tensor_name="point_clip_space", has_dim_equals=(-1, 4)) w = point_clip_space[..., -1:] return point_clip_space[..., :3] / w
ee49d891da941b6da48797035c5b976f5d10762d
3,649,641
def read_number(dtype, prompt='', floor=None, ceil=None, repeat=False): """ Reads a number within specified bounds. """ while True: try: result = dtype(input(prompt)) if floor is not None and result < floor: raise ValueError(f'Number must be no less than {floor}.') if ceil is not None and result > ceil: raise ValueError(f'Number must be no greater than {ceil}.') except ValueError as e: print(e) result = None if result is not None or not repeat: return result
a528b1f5912ba4bab0b87c87004311778eaa8187
3,649,643
from typing import Optional def dem_adjust( da_elevtn: xr.DataArray, da_flwdir: xr.DataArray, da_rivmsk: Optional[xr.DataArray] = None, flwdir: Optional[pyflwdir.FlwdirRaster] = None, connectivity: int = 4, river_d8: bool = False, logger=logger, ) -> xr.DataArray: """Returns hydrologically conditioned elevation. The elevation is conditioned to D4 (`connectivity=4`) or D8 (`connectivity=8`) flow directions based on the algorithm described in Yamazaki et al. [1]_ The method assumes the original flow directions are in D8. Therefore, if `connectivity=4`, an intermediate D4 conditioned elevation raster is derived first, based on which new D4 flow directions are obtained used to condition the original elevation. Parameters ---------- da_elevtn, da_flwdir, da_rivmsk : xr.DataArray elevation [m+REF] D8 flow directions [-] binary river mask [-], optional flwdir : pyflwdir.FlwdirRaster, optional D8 flow direction raster object. If None it is derived on the fly from `da_flwdir`. connectivity: {4, 8} D4 or D8 flow connectivity. river_d8 : bool If True and `connectivity==4`, additionally condition river cells to D8. Requires `da_rivmsk`. Returns ------- xr.Dataset Dataset with hydrologically adjusted elevation ('elevtn') [m+REF] References ---------- .. [1] Yamazaki et al. (2012). Adjustment of a spaceborne DEM for use in floodplain hydrodynamic modeling. Journal of Hydrology, 436-437, 81โ€“91. https://doi.org/10.1016/j.jhydrol.2012.02.045 See Also -------- pyflwdir.FlwdirRaster.dem_adjust pyflwdir.FlwdirRaster.dem_dig_d4 """ # get flow directions for entire domain and for rivers if flwdir is None: flwdir = flwdir_from_da(da_flwdir, mask=False) if connectivity == 4 and river_d8 and da_rivmsk is None: raise ValueError('Provide "da_rivmsk" in combination with "river_d8"') elevtn = da_elevtn.values nodata = da_elevtn.raster.nodata logger.info(f"Condition elevation to D{connectivity} flow directions.") # get D8 conditioned elevation elevtn = flwdir.dem_adjust(elevtn) # get D4 conditioned elevation (based on D8 conditioned!) if connectivity == 4: rivmsk = da_rivmsk.values == 1 if da_rivmsk is not None else None # derive D4 flow directions with forced pits at original locations d4 = pyflwdir.dem.fill_depressions( elevtn=flwdir.dem_dig_d4(elevtn, rivmsk=rivmsk, nodata=nodata), nodata=nodata, connectivity=connectivity, idxs_pit=flwdir.idxs_pit, )[1] # condition the DEM to the new D4 flow dirs flwdir_d4 = pyflwdir.from_array( d4, ftype="d8", transform=flwdir.transform, latlon=flwdir.latlon ) elevtn = flwdir_d4.dem_adjust(elevtn) # condition river cells to D8 if river_d8: flwdir_river = flwdir_from_da(da_flwdir, mask=rivmsk) elevtn = flwdir_river.dem_adjust(elevtn) # assert np.all((elv2 - flwdir_d4.downstream(elv2))>=0) # save to dataarray da_out = xr.DataArray( data=elevtn, coords=da_elevtn.raster.coords, dims=da_elevtn.raster.dims, ) da_out.raster.set_nodata(nodata) da_out.raster.set_crs(da_elevtn.raster.crs) return da_out
d59f5bae1df44cc84c4eb98d8dd14ca923dc4809
3,649,644
from copy import copy from numpy import zeros, unique from itertools import product def trainModel(label,bestModel,obs,trainSet,testSet,modelgrid,cv,optMetric='auc'): """ Train a message classification model """ pred = zeros(len(obs)) fullpred = zeros((len(obs),len(unique(obs)))) model = copy(bestModel.model) #find the best model via tuning grid for tune in [dict(list(zip(modelgrid, v))) for v in product(*list(modelgrid.values()))]: for k in list(tune.keys()): setattr(model,k,tune[k]) i = 0 for tr, vl in cv: model.fit(trainSet.ix[tr].values,obs[tr]) pred[vl] = model.predict_proba(trainSet.ix[vl].values)[:,1] fullpred[vl,:] = model.predict_proba(trainSet.ix[vl].values) i += 1 bestModel.updateModel(pred,fullpred,obs,model,trainSet.columns.values,tune,optMetric=optMetric) #re-train with all training data bestModel.model.fit(trainSet.values,obs) print(bestModel) return {label: {'pred': pred, 'test_pred':bestModel.model.predict_proba(testSet)[:,1]}}
fdf60d23894bfd997cdf7fa82cb59257ad7b2954
3,649,645
def vm_deploy(vm, force_stop=False): """ Internal API call used for finishing VM deploy; Actually cleaning the json and starting the VM. """ if force_stop: # VM is running without OS -> stop cmd = 'vmadm stop %s -F >/dev/null 2>/dev/null; vmadm get %s 2>/dev/null' % (vm.uuid, vm.uuid) else: # VM is stopped and deployed -> start cmd = 'vmadm start %s >/dev/null 2>/dev/null; vmadm get %s 2>/dev/null' % (vm.uuid, vm.uuid) msg = 'Deploy server' lock = 'vmadm deploy ' + vm.uuid meta = { 'output': { 'returncode': 'returncode', 'stderr': 'message', 'stdout': 'json' }, 'replace_stderr': ((vm.uuid, vm.hostname),), 'msg': msg, 'vm_uuid': vm.uuid } callback = ('api.vm.base.tasks.vm_deploy_cb', {'vm_uuid': vm.uuid}) return execute(ERIGONES_TASK_USER, None, cmd, meta=meta, lock=lock, callback=callback, queue=vm.node.fast_queue, nolog=True, ping_worker=False, check_user_tasks=False)
324dffa2a181d4b796a8f263eeb57d1452826c78
3,649,646
def get_monitor_value(image, monitor_key): """Return the monitor value from an image using an header key. :param fabio.fabioimage.FabioImage image: Image containing the header :param str monitor_key: Key containing the monitor :return: returns the monitor else returns 1.0 :rtype: float """ if monitor_key is None or monitor_key == "": return 1.0 try: monitor = header_utils.get_monitor_value(image, monitor_key) return monitor except header_utils.MonitorNotFound: logger.warning("Monitor %s not found. No normalization applied.", monitor_key) return 1.0 except Exception as e: logger.warning("Fail to load monitor. No normalization applied. %s", str(e)) return 1.0
cf74ab608837b6f5732a70d997afa1fe424b2ee1
3,649,649
def default_thread_index (value, threads): """ find index in threads array value :param value: :param threads: :return: """ value_index = threads.index(value) return value_index
7be2efb6579f2880f53dac11705ba6a068c2d92d
3,649,651
import requests def new_things(url): """Attempts to register new things on the directory Takes 1 argument: url - URL containing thing descriptions to register """ response = requests.post('{}/things/register_url'.format(settings.THING_DIRECTORY_HOST), headers={ 'Authorization': settings.THING_DIRECTORY_KEY, }, json={'url':url}) response.raise_for_status() return response.json()['uuids']
0336d094e9581f3382dd33ac8a9bf8fd43754d82
3,649,652
def isID(value): """Checks if value looks like a Ulysses ID; i.e. is 22 char long. Not an exact science; but good enougth to prevent most mistakes. """ return len(value) == 22
527db9446adc2b88c2117bd35c74474c3e7bad24
3,649,653
def tool_on_path(tool: str) -> str: """ Helper function to determine if a given tool is on the user's PATH variable. Wraps around runspv.tool_on_path(). :param tool: the tool's filename to look for. :return: the path of the tool, else ToolNotOnPathError if the tool isn't on the PATH. """ return runspv.tool_on_path(tool)
52963a818bcea59eaaec1d20000d3a4a1296ee26
3,649,654
def DefineDecode(i, n, invert=False): """ Decode the n-bit number i. @return: 1 if the n-bit input equals i """ class _Decode(Circuit): name = 'Decode_{}_{}'.format(i, n) IO = ['I', In(Bits[ n ]), 'O', Out(Bit)] @classmethod def definition(io): if n <= 8: j = 1 << i if invert: m = 1 << n mask = (1 << m) - 1 j = mask & (~j) decode = ROMN(j, n) else: nluts = (n + 3) // 4 data = nluts * [0] for j in range(nluts): data[j] = (i >> 4*j) & 0xf # 4-bit pieces decode = FlatHalfCascade(n, 4, data, ZERO, 1) wire(io.I, decode.I) wire(decode.O, io.O) return _Decode
9be19b191a1048dffd8a6fe82caabdcb1dd33f42
3,649,655
def absent(name, database, **client_args): """ Ensure that given continuous query is absent. name Name of the continuous query to remove. database Name of the database that the continuous query was defined on. """ ret = { "name": name, "changes": {}, "result": True, "comment": "continuous query {0} is not present".format(name), } if __salt__["influxdb.continuous_query_exists"](database, name, **client_args): if __opts__["test"]: ret["result"] = None ret["comment"] = ( "continuous query {0} is present and needs to be removed" ).format(name) return ret if __salt__["influxdb.drop_continuous_query"](database, name, **client_args): ret["comment"] = "continuous query {0} has been removed".format(name) ret["changes"][name] = "Absent" return ret else: ret["comment"] = "Failed to remove continuous query {0}".format(name) ret["result"] = False return ret return ret
f280dad71275cd576edbefac9376463a2ab91fc7
3,649,656
def get_ads(client, customer_id, new_ad_resource_names): """Retrieves a google.ads.google_ads.v4.types.AdGroupAd instance. Args: client: A google.ads.google_ads.client.GoogleAdsClient instanc e. customer_id: (str) Customer ID associated with the account. new_ad_resource_names: (str) Resource name associated with the Ad group. Returns: An instance of the google.ads.google_ads.v4.types.AdGroupAd message class of the newly created ad group ad. """ def formatter(given_string): """This helper function is used to assign ' ' to names of resources so that this formatted string can be used within an IN clause. Args: given_string: (str) The string to be formatted. """ results = [] for i in given_string: results.append(repr(i)) return ','.join(results) resouce_names = formatter(new_ad_resource_names) ga_service = client.get_service('GoogleAdsService', version='v4') query = ('SELECT ad_group_ad.ad.id, ' 'ad_group_ad.ad.expanded_text_ad.headline_part1, ' 'ad_group_ad.ad.expanded_text_ad.headline_part2, ' 'ad_group_ad.status, ad_group_ad.ad.final_urls, ' 'ad_group_ad.resource_name ' 'FROM ad_group_ad ' 'WHERE ad_group_ad.resource_name in ({}) '. format(resouce_names)) response = ga_service.search(customer_id, query, PAGE_SIZE) response =iter(response) ads = [] while response: try: current_row = next(response) ads.append(current_row.ad_group_ad) except StopIteration: break return ads
3e1bc99901490c53c66418a63238cf76de282896
3,649,657
def corrfact_vapor_rosolem(h, h_ref=None, const=0.0054): """Correction factor for vapor correction from absolute humidity (g/m3). The equation was suggested by Rosolem et al. (2013). If no reference value for absolute humidity ``h_ref`` is provided, the average value will be used. Parameters ---------- h : float or array of floats Absolute humidity (g / m3) h_ref : float Reference value for absolute humidity const : float Empirical constant, defaults to 0.0054 Returns ------- output : float or array of floats Correction factor for water vapor effect (dimensionless) """ if h_ref is None: h_ref = np.mean(h) return 1 + const * (h - h_ref)
6add20bf118e85e77f245776101169efb9ba4eac
3,649,658
def sine_ease_out(p): """Modeled after quarter-cycle of sine wave (different phase)""" return sin(p * tau)
58a78ad44e04df42f0533b6a94e51d04398407a9
3,649,659
def _extract_codes_from_element_text(dataset, parent_el_xpath, condition=None): # pylint: disable=invalid-name """Extract codes for checking from a Dataset. The codes are being extracted from element text. Args: dataset (iati.data.Dataset): The Dataset to check Codelist values within. parent_el_xpath (str): An XPath to locate the element(s) with the attribute of interest. condition (str): An optional XPath expression to limit the scope of what is extracted. Returns: list of tuple: A tuple in the format: `(str, int)` - The `str` is a matching code from within the Dataset; The `int` is the sourceline at which the parent element is located. """ # include the condition if condition: parent_el_xpath = parent_el_xpath + '[' + condition + ']' parents_to_check = dataset.xml_tree.xpath(parent_el_xpath) located_codes = list() for parent in parents_to_check: located_codes.append((parent.text, parent.sourceline)) return located_codes
45e4ec2a61dc38066ad9a71d41e63a48c6ccde23
3,649,660
def rotate_im(img, angle, interpolation=cv2.INTER_LINEAR, border_mode=cv2.BORDER_REFLECT_101, value=None): """Rotate the image. Rotate the image such that the rotated image is enclosed inside the tightest rectangle. The area not occupied by the pixels of the original image is colored black. Parameters ---------- image : numpy.ndarray numpy image angle : float angle by which the image is to be rotated Returns ------- numpy.ndarray Rotated Image """ # grab the dimensions of the image and then determine the # centre (h, w) = img.shape[:2] (cX, cY) = (w // 2, h // 2) # grab the rotation matrix (applying the negative of the # angle to rotate clockwise), then grab the sine and cosine # (i.e., the rotation components of the matrix) M = cv2.getRotationMatrix2D((cX, cY), angle, 1.0) cos = np.abs(M[0, 0]) sin = np.abs(M[0, 1]) # compute the new bounding dimensions of the image nW = int((h * sin) + (w * cos)) nH = int((h * cos) + (w * sin)) # adjust the rotation matrix to take into account translation M[0, 2] += (nW / 2) - cX M[1, 2] += (nH / 2) - cY warp_fn = _maybe_process_in_chunks( cv2.warpAffine, M=M, dsize=(nW, nH), flags=interpolation, borderMode=border_mode, borderValue=value ) return warp_fn(img)
40ab5d9761bdb2044fe99af4d5a51187edd34327
3,649,661
def list_modules(curdir=CURDIR, pattern=MOD_FILENAME_RE): """List names from {ok,ng}*.py. """ return sorted( m.name.replace('.py', '') for m in curdir.glob('*.py') if pattern.match(m.name) )
249b276ec5f42534a4ad162c02110bcf1f9cadf0
3,649,662
def encode_set_validator_config_and_reconfigure_script( validator_account: AccountAddress, consensus_pubkey: bytes, validator_network_addresses: bytes, fullnode_network_addresses: bytes, ) -> Script: """# Summary Updates a validator's configuration, and triggers a reconfiguration of the system to update the validator set with this new validator configuration. Can only be successfully sent by a Validator Operator account that is already registered with a validator. # Technical Description This updates the fields with corresponding names held in the `ValidatorConfig::ValidatorConfig` config resource held under `validator_account`. It then emits a `DiemConfig::NewEpochEvent` to trigger a reconfiguration of the system. This reconfiguration will update the validator set on-chain with the updated `ValidatorConfig::ValidatorConfig`. # Parameters | Name | Type | Description | | ------ | ------ | ------------- | | `validator_operator_account` | `&signer` | Signer reference of the sending account. Must be the registered validator operator for the validator at `validator_address`. | | `validator_account` | `address` | The address of the validator's `ValidatorConfig::ValidatorConfig` resource being updated. | | `consensus_pubkey` | `vector<u8>` | New Ed25519 public key to be used in the updated `ValidatorConfig::ValidatorConfig`. | | `validator_network_addresses` | `vector<u8>` | New set of `validator_network_addresses` to be used in the updated `ValidatorConfig::ValidatorConfig`. | | `fullnode_network_addresses` | `vector<u8>` | New set of `fullnode_network_addresses` to be used in the updated `ValidatorConfig::ValidatorConfig`. | # Common Abort Conditions | Error Category | Error Reason | Description | | ---------------- | -------------- | ------------- | | `Errors::NOT_PUBLISHED` | `ValidatorConfig::EVALIDATOR_CONFIG` | `validator_address` does not have a `ValidatorConfig::ValidatorConfig` resource published under it. | | `Errors::REQUIRES_ROLE` | `Roles::EVALIDATOR_OPERATOR` | `validator_operator_account` does not have a Validator Operator role. | | `Errors::INVALID_ARGUMENT` | `ValidatorConfig::EINVALID_TRANSACTION_SENDER` | `validator_operator_account` is not the registered operator for the validator at `validator_address`. | | `Errors::INVALID_ARGUMENT` | `ValidatorConfig::EINVALID_CONSENSUS_KEY` | `consensus_pubkey` is not a valid ed25519 public key. | | `Errors::INVALID_STATE` | `DiemConfig::EINVALID_BLOCK_TIME` | An invalid time value was encountered in reconfiguration. Unlikely to occur. | # Related Scripts * `Script::create_validator_account` * `Script::create_validator_operator_account` * `Script::add_validator_and_reconfigure` * `Script::remove_validator_and_reconfigure` * `Script::set_validator_operator` * `Script::set_validator_operator_with_nonce_admin` * `Script::register_validator_config` """ return Script( code=SET_VALIDATOR_CONFIG_AND_RECONFIGURE_CODE, ty_args=[], args=[ TransactionArgument__Address(value=validator_account), TransactionArgument__U8Vector(value=consensus_pubkey), TransactionArgument__U8Vector(value=validator_network_addresses), TransactionArgument__U8Vector(value=fullnode_network_addresses), ], )
8b5e5d259750eecf3cea78e9abba82300baa2626
3,649,663
def _do_ecf_reference_data_import( import_method, widget, logwidget=None, specification_items=None, ecfdate=None, datecontrol=None, ): """Import a new ECF club file. widget - the manager object for the ecf data import tab """ ecffile = widget.datagrid.get_data_source().dbhome # The commented code fails if tkinter is compiled without --enable-threads # as in OpenBSD 5.7 i386 packages. The standard build from FreeBSD ports # until early 2015 at least, when this change was introduced, is compiled # with --enable-threads so the commented code worked. Not sure if the # change in compiler on FreeBSD from gcc to clang made a difference. The # Microsoft Windows' Pythons seem to be compiled with --enable-threads # because the commented code works in that environment. The situation on # OS X, and any GNU-Linux distribution, is not known. # Comparison with the validate_and_copy_ecf_ogd_players_post_2006_rules() # method in the sibling module sqlite3ecfogddataimport, which worked on # OpenBSD 5.7 as it stood, highlighted the changes needed. # ecfdate = widget.get_ecf_date() if not ecffile: return False if not ecfdate: return False results = widget.get_appsys().get_results_database() if not results: return False results.do_database_task( import_method, logwidget=logwidget, taskmethodargs=dict( ecffile=ecffile, ecfdate=ecfdate, parent=widget.get_widget(), # datecontrol=widget.ecfdatecontrol.get(), datecontrol=datecontrol, # See --enable-threads comment just above. ), use_specification_items=specification_items, ) return True
593b1ac77688c92c9fcd3ea8fafb3f5089849293
3,649,664
import ast import inspect def ast_operators(node): """Return a set of all operators and calls in the given AST, or return an error if any are invalid.""" if isinstance(node, (ast.Name, ast.Constant)): return set() elif isinstance(node, ast.BinOp): return {type(node.op)} | ast_operators(node.left) | ast_operators(node.right) elif isinstance(node, ast.UnaryOp): return {type(node.op)} | ast_operators(node.operand) elif isinstance(node, ast.Call): if node.func.id not in METRIC_OPS: raise ValueError(f"Unknown fn `{node.func.id}` in metric equation.") # Make sure the number of args matches the fn signature fn_argspec = inspect.getfullargspec(METRIC_OPS[node.func.id]) if (not node.args or (fn_argspec.varargs is None and fn_argspec.varkw is None and len(node.args) != len(fn_argspec.args))): raise ValueError(f"Unexpected number of args to {node.func.id}") return {node.func.id}.union(*(ast_operators(arg) for arg in node.args)) else: raise TypeError(node)
ce5c69e228fbab682cd41330a058b6f16b8d5d1a
3,649,665
def calibrate_clock(out, tolerance=0.002, dcor=False): """\ currently for F2xx only: recalculate the clock calibration values and write them to the flash. """ device = get_msp430_type() >> 8 variables = {} if device == 0xf2: # first read the segment form the device, so that only the calibration values # are updated. any other data in SegmentA is not changed. segment_a = memory.Memory() segment_a.append(memory.Segment(0x10c0, jtag._parjtag.memread(0x10c0, 64))) # get the settings for all the frequencies for frequency in calibvalues_memory_map: measured_frequency, dco, bcs1 = clock.setDCO( frequency * (1 - tolerance), frequency * (1 + tolerance), maxrsel=15, dcor=dcor ) variables['f%dMHz_dcoctl' % (frequency / 1e6)] = TYPE_8BIT, dco variables['f%dMHz_bcsctl1' % (frequency / 1e6)] = TYPE_8BIT, bcs1 out.write('BCS settings for %s: DCOCTL=0x%02x BCSCTL1=0x%02x\n' % ( nice_frequency(measured_frequency), dco, bcs1) ) segment_a.setMem(calibvalues_memory_map[frequency]['DCO'], chr(dco)) segment_a.setMem(calibvalues_memory_map[frequency]['BCS1'], chr(bcs1)) # erase segment and write new values jtag._parjtag.memerase(jtag.ERASE_SEGMENT, segment_a[0].startaddress) jtag._parjtag.memwrite(segment_a[0].startaddress, segment_a[0].data) else: raise NotImplementedError("--calibrate is not supported on %Xxx" % device) return variables
6ad9940a0b43aff54317ff0b054a5a8e84fa5f73
3,649,666
def get_rejection_listings(username): """ Get Rejection Listings for a user Args: username (str): username for user """ activities = models.ListingActivity.objects.for_user(username).filter( action=models.ListingActivity.REJECTED) return activities
47f7078f193de651f282d1823900cd876bf9fd93
3,649,667
def quadratic_weighted_kappa(y_true, y_pred): """ QWK (Quadratic Weighted Kappa) Score Args: y_true: target array. y_pred: predict array. must be a discrete format. Returns: QWK score """ return cohen_kappa_score(y_true, y_pred, weights='quadratic')
fe3208d58cfbed7fdc51ee6069bb4d72584ea6d7
3,649,668
def statistika(): """Posodobi podatke in preusmeri na statistika.html""" check_user_id() data_manager.load_data_from_file() data_manager.data_for_stats() return bottle.template("statistika.html", data_manager=data_manager)
afc72610e4ca245089b131d06dfb5ed8a172615c
3,649,669
def decrement(x): """Given a number x, returns x - 1 unless that would be less than zero, in which case returns 0.""" x -= 1 if x < 0: return 0 else: return x
56b95324c147a163d3bdd0e9f65782095b0a4def
3,649,670
def get_dagmaf(maf: msa.Maf) -> DAGMaf.DAGMaf: """Converts MAF to DagMaf. Args: maf: MAF to be converted. Returns: DagMaf built from the MAF. """ sorted_blocks = sort_mafblocks(maf.filecontent) dagmafnodes = [ DAGMaf.DAGMafNode(block_id=b.id, alignment=b.alignment, orient=b.orient, order=b.order(), out_edges=b.out_edges) for b in sorted_blocks ] return DAGMaf.DAGMaf(dagmafnodes)
40fd06a9429874f1ca7188f2ff185c4dd8b64e01
3,649,671
def optdat10(area,lpdva,ndvab,nglb): """Fornece dados para a otimizacao""" # Tipo de funcao objetivo: tpobj==1 ---Peso # tpobj==2 ---Energia # tpobj==3 ---Mรกxima tensรฃo # tpobj==4 ---Mรกximo deslocamento # tpobj = 1 # # Tipo de funcao restriรงรฃo: tpres==1 ---Peso # tpres==2 ---Tensรฃo # tpres==3 ---Tensรฃo e deslocamento # tpres==4 ---Deslocamento # tpres==5 ---Energia tpres = 2 # # Entrar com os valores limites das variรกveis de projeto # vlb---limite inferiores # vub---limite superiores # x0 --- valor inicial # xpdva = np.zeros(ndvab) for idvab in range(ndvab): iel = lpdva[idvab] xpdva[idvab] = area[iel] x0 = xpdva vlb = 0.1 * np.ones(ndvab) vlb = 0.1 * np.ones(ndvab) vub = 10 * np.ones(ndvab) # # Entrar com os valores limites das restriรงรตes # clb---limites inferiores # cub---limites superiores cones = np.ones(len(area)) # relacionado ao nยบ de elementos cones2 = np.ones(nglb) # relacionado ao nยบ de graus de liberdade clb1 = -250 * cones cub1 = 250 * cones # clb1 = -20*cones # cub1 = 20*cones # dlb1 = -0.4*cones2 # dub1 = 0.4*cones2 clbv = 1.5e+06 - 2.2204e-16 # 0 cubv = 1.5e+06 clbd = -1 * (10 ** -3) * cones2 cubd = 1 * (10 ** -3) * cones2 elbv = 2e-2 eubv = 2e-2 if tpres == 1: # VOLUME cub = cubv clb = clbv elif tpres == 2: # TENSOES clb = clb1 cub = cub1 elif tpres == 3: # TENSOES e DESLOCAMENTOS clb = [clb1, clbd] cub = [cub1, cubd] elif tpres == 4: # DESLOCAMENTOS clb = clbd cub = cubd else: # ENERGIA clb = elbv cub = eubv dadosoptdat10= [tpobj,tpres,vlb,vub,x0,clb,cub] return dadosoptdat10
064813cb2e66adfed6cb5e694614b88343a7613c
3,649,672
def rotvec2quat(vec): """ A rotation vector is a 3 dimensional vector which is co-directional to the axis of rotation and whose norm gives the angle of rotation (in radians). Args: vec (list or np.ndarray): a rotational vector. Its norm represents the angle of rotation. Returns: np.ndarray: quaternion [x,y,z,w] (shape: :math:`[4,]`). """ r = R.from_rotvec(vec) return r.as_quat()
a19b7b67e9cd5877cc5045887d071e069892e0a6
3,649,673
def generate_pop(pop_size, length): """ ๅˆๅง‹ๅŒ–็ง็พค :param pop_size: ็ง็พคๅฎน้‡ :param length: ็ผ–็ ้•ฟๅบฆ :return bin_population: ไบŒ่ฟ›ๅˆถ็ผ–็ ็ง็พค """ decim_population = np.random.randint(0, 2**length-1, pop_size) print(decim_population) bin_population = [('{:0%sb}'%length).format(x) for x in decim_population] return bin_population
d1248fe59161d2a75eaf08ffe2b180537c2d1af5
3,649,674
def CountClusterSizes(clusterLabels): """ This function takes the labels produced by spectral clustering (or other clustering algorithm) and counts the members in each cluster. This is primarily to see the distribution of cluster sizes over all windows, particularly to see if there singleton clusters or a significant number of clusters with a small number of members. Parameters --------- clusterLabels: numpy array of int (clustered customers) - the cluster label of each customer Returns ------- clusterCounts: numpy array of int (0,k) - the number of customers in each cluster """ currentK = len(np.unique(clusterLabels)) clusterCounts = np.zeros((1,currentK),dtype=int) for clustCtr in range(0,currentK): indices = np.where(clusterLabels==clustCtr)[0] clusterCounts[0,clustCtr] = len(indices) return clusterCounts
25bf78a83e55b72c7a33546450655efe7ee84874
3,649,676
def solver_problem1(digits_list): """input digits and return numbers that 1, 4, 7, 8 occurs""" cnt = 0 for digits in digits_list: for d in digits: if len(d) in [2, 3, 4, 7]: cnt += 1 return cnt
d1946d00d368ad498c9bb0a8562ec0ea76d26449
3,649,677
def spam_dotprods(rhoVecs, povms): """SPAM dot products (concatenates POVMS)""" nEVecs = sum(len(povm) for povm in povms) ret = _np.empty((len(rhoVecs), nEVecs), 'd') for i, rhoVec in enumerate(rhoVecs): j = 0 for povm in povms: for EVec in povm.values(): ret[i, j] = _np.vdot(EVec.todense(), rhoVec.todense()); j += 1 # todense() gives a 1D array, so no need to transpose EVec return ret
95adc6ea8e1d33899a7dc96ba99589ef9bffb7fe
3,649,678
def get_chi_atom_indices(): """Returns atom indices needed to compute chi angles for all residue types. Returns: A tensor of shape [residue_types=21, chis=4, atoms=4]. The residue types are in the order specified in rc.restypes + unknown residue type at the end. For chi angles which are not defined on the residue, the positions indices are by default set to 0. """ chi_atom_indices = [] for residue_name in rc.restypes: residue_name = rc.restype_1to3[residue_name] residue_chi_angles = rc.chi_angles_atoms[residue_name] atom_indices = [] for chi_angle in residue_chi_angles: atom_indices.append([rc.atom_order[atom] for atom in chi_angle]) for _ in range(4 - len(atom_indices)): atom_indices.append( [0, 0, 0, 0] ) # For chi angles not defined on the AA. chi_atom_indices.append(atom_indices) chi_atom_indices.append([[0, 0, 0, 0]] * 4) # For UNKNOWN residue. return chi_atom_indices
5ac6f2208e2819b8e0d04329cbfb94cb5dcd26ba
3,649,680
def get_all_device_stats(): """Obtain and return statistics for all attached devices.""" devices = get_devices() stats = {} for serial in devices: model, device_stats = get_device_stats(serial) if not stats.get(model): stats[model] = {} stats[model][serial] = device_stats return stats
9f2a50c4f6008120bc9527260f501f7e261dd19f
3,649,681
def plot_coefs(coefficients, nclasses): """ Plot the coefficients for each label coefficients: output from clf.coef_ nclasses: total number of possible classes """ scale = np.max(np.abs(coefficients)) p = plt.figure(figsize=(25, 5)) for i in range(nclasses): p = plt.subplot(1, nclasses, i + 1) p = plt.imshow(coefficients[i].reshape(28, 28), cmap=plt.cm.RdBu, vmin=-scale, vmax=scale) p = plt.axis('off') p = plt.title('Class %i' % i) return None
356c6c4bb96b08a370b8c492275e638b059594e2
3,649,682
from datetime import datetime def get_description(): """ Return a dict describing how to call this plotter """ desc = dict() desc['data'] = True desc['description'] = """This plot shows the number of days with a high temperature at or above a given threshold. You can optionally generate this plot for the year to date period. """ today = datetime.date.today() desc['arguments'] = [ dict(type='station', name='station', default='IA2203', label='Select Station:', network='IACLIMATE'), dict(type="year", name="year", default=today.year, label="Year to Compare:"), dict(type='select', options=PDICT, default='full', label='Day Period Limit:', name='limit'), ] return desc
479d98e9ab19dcc03332c1a95ccc0624cdcfe24d
3,649,684
def calc_cost_of_buying(count, price): """ๆ ชใ‚’่ฒทใ†ใฎใซๅฟ…่ฆใชใ‚ณใ‚นใƒˆใจๆ‰‹ๆ•ฐๆ–™ใ‚’่จˆ็ฎ— """ subtotal = int(count * price) fee = calc_fee(subtotal) return subtotal + fee, fee
391909bbff35c6eb7d68c965e3f36317e4164b1a
3,649,685
import signal def update_lr(it_lr, alg, test_losses, lr_info=None): """Update learning rate according to an algorithm.""" if lr_info is None: lr_info = {} if alg == 'seung': threshold = 10 if 'change' not in lr_info.keys(): lr_info['change'] = 0 if lr_info['change'] >= 4: return it_lr, lr_info # Smooth test_losses then check to see if they are still decreasing if len(test_losses) > threshold: smooth_test = signal.savgol_filter(np.asarray(test_losses), 3, 2) check_test = np.all(np.diff(smooth_test)[-threshold:] < 0) if check_test: it_lr = it_lr / 2. lr_info['change'] += 1 return it_lr, lr_info elif alg is None or alg == '' or alg == 'none': return it_lr, lr_info else: raise NotImplementedError('No routine for: %s' % alg)
de6ef7d700a9c4b549b6d500f6737c84dc032c95
3,649,687
from functools import reduce def factors(n): """ return set of divisors of a number """ step = 2 if n%2 else 1 return set(reduce(list.__add__, ([i, n//i] for i in range(1, int(sqrt(n))+1, step) if n % i == 0)))
687608f5397181892aa338c96ee299f91d7b5431
3,649,689
import decimal def round_decimal(x, digits=0): """This function returns the round up float. Parameters ---------- x : a float digits : decimal point Returns ---------- Rounded up float """ x = decimal.Decimal(str(x)) if digits == 0: return int(x.quantize(decimal.Decimal("1"), rounding='ROUND_HALF_UP')) if digits > 1: string = '1e' + str(-1 * digits) else: string = '1e' + str(-1 * digits) return float(x.quantize(decimal.Decimal(string), rounding='ROUND_HALF_UP'))
8670fa1e9063376e012ebbc71df0a19c6205ea9c
3,649,690
def basic_gn_stem(model, data, **kwargs): """Add a basic ResNet stem (using GN)""" dim = 64 p = model.ConvGN( data, 'conv1', 3, dim, 7, group_gn=get_group_gn(dim), pad=3, stride=2 ) p = model.Relu(p, p) p = model.MaxPool(p, 'pool1', kernel=3, pad=1, stride=2) return p, dim
7cd1c1e0ff58431fc89acdec0f6c1d5f6fa9daa8
3,649,691
def log_scale(start,end,num): """Simple wrapper to generate list of numbers equally spaced in logspace Parameters ---------- start: floar Inital number end: Float Final number num: Float Number of number in the list Returns ------- list: 1d array List of number spanning start to end, equally space in log space """ return np.logspace(np.log10(start), np.log10(end), num = num)
32d3976cb9cbcceb4cef9af15da373ea84e4d0c7
3,649,692
def measure_xtran_params(neutral_point, transformation): """ Description: Assume that the transformation from robot coord to camera coord is: RotX -> RotY -> RotZ -> Tranl In this case: RotX = 180, RotY = 0; RotZ = -90; Tranl: unknown But we know coords of a determined neutral point in 2 coord systems, hence we can measure Transl from robot centroid to camera centroid.(Step 2) :param neutral_point : Dict, list of 2 coords of neutral_point in 2 coord systems :param transformation : Dict, list of 3 rotating transformations :return: r2c_xtran : Matrix 4x4 floats, transformation from robot coord to camera coord :return: c2r_xtran : Matrix 4x4 floats, transformation from camera coord to robot coord # :return: tranl : Matrix 4x4 floats, translation from robot coord to camera coord """ # 1: Load coords of the neutral point neutral_robot = mm2m(coords=np.array(neutral_point['robot_coord'])) # neutral point coord in robot coord system neutral_camera = mm2m(coords=np.array(neutral_point['camera_coord'])) # neutral point coord in camera coord system rotx = create_rotx_matrix(theta=-transformation['rotx']) # load transformation matrix of rotation around x roty = create_roty_matrix(theta=-transformation['roty']) # load transformation matrix of rotation around y rotz = create_rotz_matrix(theta=-transformation['rotz']) # load transformation matrix of rotation around z # 2: Find transformation between robot coord centroid and camera coord centroid rotxyz = np.dot(np.dot(rotz, roty), rotx) # determine transformation matrix after rotate sequently around x, y, z neutral_robot3 = np.dot(rotxyz, np.append(neutral_robot, 1))[:3] # find coord of neutral point after RotXYZ Oc_in_3 = neutral_robot3 - neutral_camera # find coord of robot centroid in camera coord system tranl = create_tranl_matrix(vector=-Oc_in_3) # 3: Find transformation matrix from robot to camera # r2c_xtran = np.dot(np.dot(np.dot(tranl, rotz), roty), rotx) # c2r_xtran = np.linalg.inv(r2c_xtran) return rotx, roty, rotz, tranl
c2758158d545dbc6c2591f7f64f1df159a0c82db
3,649,693
def getPrefix(routetbl, peer_logical): """ FUNCTION TO GET THE PREFIX """ for route in routetbl: if route.via == peer_logical: return route.name else: pass
2ca32a1fd63d6fcefbcc9ac23e8636c73e88455b
3,649,694
def Logger_log(level, msg): """ Logger.log(level, msg) logs a message to the log. :param int level: the level to log at. :param str msg: the message to log. """ return _roadrunner.Logger_log(level, msg)
af552b17aaeebef9713efffedcabd75946c961f1
3,649,695
import typing def obj_test(**field_tests: typing.Callable[[typing.Any], bool]) -> typing.Callable[[typing.Any], bool]: """Return a lambda that tests for dict with string keys and a particular type for each key""" def test(dat: typing.Any) -> bool: type_test(dict)(dat) dom_test = type_test(str) for dom, rng in dat.items(): dom_test(dom) if dom not in field_tests: continue rng_test = field_tests[dom] rng_test(rng) missing = set(field_tests.keys()) - set(dat.keys()) if missing: raise Exception(f"{dat!r} lacks fields {missing}") return True return test
0439821b634807e178539b0444b69305c15e2e4e
3,649,696
def hist2D(x, y, xbins, ybins, **kwargs): """ Create a 2 dimensional pdf vias numpy histogram2d""" H, xedg, yedg = np.histogram2d(x=x, y=y, bins=[xbins,ybins], density=True, **kwargs) xcen = (xedg[:-1] + xedg[1:]) / 2 ycen = (yedg[:-1] + yedg[1:]) / 2 return xcen, ycen, H
7f192f4db38e954aad96abc66fa4dc9c190acd82
3,649,697
def generate_ngram_dict(filename, tuple_length): """Generate a dict with ngrams as key following words as value :param filename: Filename to read from. :param tuple_length: The length of the ngram keys :return: Dict of the form {ngram: [next_words], ... } """ def file_words(file_pointer): """Generator for words in a file""" for line in file_pointer: for word in line.split(): yield word ngrams = defaultdict(lambda: set()) with open(filename, 'r') as fp: word_list = [] for word in file_words(fp): if len(word_list) < tuple_length: word_list.append(word) continue ngrams[tuple(word_list)].add(word) word_list = word_list[1:] + [word] return {key: tuple(val) for key, val in ngrams.items()}
45f7eccae852e61f20044448955cade00174998c
3,649,698
def get_end_point(centerline, offset=0): """ Get last point(s) of the centerline(s) Args: centerline (vtkPolyData): Centerline(s) offset (int): Number of points from the end point to be selected Returns: centerline_end_point (vtkPoint): Point corresponding to end of centerline. """ centerline_end_points = [] for i in range(centerline.GetNumberOfLines()): line = extract_single_line(centerline, i) centerline_end_points.append(line.GetPoint(line.GetNumberOfPoints() - 1 - offset)) return centerline_end_points
f476e93b55bb046cfb6afb61a2e3ae37a172def3
3,649,699
def random_train_test_split(df, train_frac, random_seed=None): """ This function randomizes the dta based on the seed and then splits the dataframe into train and test sets which are changed to their list of vector representations. Args: df (Dataframe): The dataframe which is to be used to generate the train and test split. train_frac (int): The percentage in the range 0.0 to 1 that should be used for training. random_seed (int, optional): The seed for randomising. Defaults to None which means a seed is chosen at random. Returns: tuple: The list of lists representing the vectors in the train and test data frame respectively, """ if random_seed is not None: df = df.sample(frac=1, random_state=random_seed) else: df = df.sample(frac=1) split_point = int(len(df.index) * train_frac) train = to_vector(df[:split_point]) test = to_vector(df[split_point:]) return train, test
c3c399792bdc1026d74f1ccc7241bdb327f307d3
3,649,700
def calc_XY_pixelpositions(calibration_parameters, DATA_Q, nspots, UBmatrix=None, B0matrix=IDENTITYMATRIX, offset=0, pureRotation=0, labXMAS=0, verbose=0, pixelsize=0.079, dim=(2048, 2048), kf_direction="Z>0"): """ must: len(varying_parameter_values)=len(varying_parameter_indices) DATA_Q: array of all 3 elements miller indices nspots: indices of selected spots of DATA_Q UBmatrix: WARNING: All miller indices must be entered in DATA_Q, selection is done in xy_from_Quat returns: """ # selecting nspots of DATA_Q # print "DATA_Q in calc_XY_pixelpositions", DATA_Q # print "nspots", nspots # print "len(DATA_Q)", len(DATA_Q) DATAQ = np.take(DATA_Q, nspots, axis=0) trQ = np.transpose(DATAQ) # np.array(Hs, Ks,Ls) for further computations # print "DATAQ in xy_from_Quat", DATAQ if UBmatrix is not None: R = UBmatrix # q = UB * B0 * Q trQ = np.dot(np.dot(R, B0matrix), trQ) # results are qx,qy,qz else: print("I DON'T LIKE INITROT == None") print("this must mean that INITROT = Identity ?...") Qrot = trQ # lattice rotation due to quaternion Qrotn = np.sqrt(np.sum(Qrot ** 2, axis=0)) # norms of Q vectors twthe, chi = F2TC.from_qunit_to_twchi(Qrot / Qrotn, labXMAS=labXMAS) # print "twthe, chi", twthe, chi if verbose: print("tDATA_Q", np.transpose(DATA_Q)) print("Qrot", Qrot) print("Qrotn", Qrotn) print("Qrot/Qrotn", Qrot / Qrotn) print("twthe,chi", twthe, chi) X, Y, theta = F2TC.calc_xycam_from2thetachi( twthe, chi, calibration_parameters, offset=offset, verbose=0, pixelsize=pixelsize, kf_direction=kf_direction) return X, Y, theta, R
243bdb74da8aa429a0748d6d15b6b9d1f20814f3
3,649,701
def load_csv(path): """ Function for importing data from csv. Function uses weka implementation of CSVLoader. :param path: input file :return: weka arff data """ args, _sufix = csv_loader_parser() loader = Loader(classname='weka.core.converters.CSVLoader', options=args_to_weka_options(args, _sufix)) return loader.load_file(path)
d7555cbe5e54543ca8f66e5f70d4f20b7b72b549
3,649,702
import json def from_stream(stream, storage, form): """Reverses to_stream, returning data""" if storage == "pure-plain": assert isinstance(stream, str) if isinstance(stream, str): txt = stream else: assert not stream.startswith(MAGIC_SEAMLESS) assert not stream.startswith(MAGIC_NUMPY) txt = stream.decode("utf-8") result = json.loads(txt) return result elif storage == "pure-binary": b = BytesIO(stream) arr0 = np.load(b, allow_pickle=False) if arr0.ndim == 0 and arr0.dtype.char != "S": arr = np.frombuffer(arr0,arr0.dtype) return arr[0] else: return arr0 assert stream.startswith(MAGIC_SEAMLESS) l = len(MAGIC_SEAMLESS) s1 = stream[l:l+8] s2 = stream[l+8:l+16] len_jsons = np.frombuffer(s1, dtype=np.uint64).tolist()[0] buffersize = np.frombuffer(s2, dtype=np.uint64).tolist()[0] assert len(stream) == l + 16 + len_jsons + buffersize bytes_jsons = stream[l+16:l+16+len_jsons] jsons = json.loads(bytes_jsons.decode("utf-8")) bytebuffer = stream[l+16+len_jsons:] buffer = np.frombuffer(bytebuffer,dtype=np.uint8) data = _from_stream( None, storage, form, jsons, buffer ) return data
ee8c657162947354b3533b4fe607a11a8a6457ec
3,649,703
def UniformExploration(j, state): """Fake player j that always targets all arms.""" return list(np.arange(state.K))
146b84ff0d9e28a8316e871b94e5bb82d67de997
3,649,705
def deduction_limits(data): """ Apply limits on itemized deductions """ # Split charitable contributions into cash and non-cash using ratio in PUF cash = 0.82013 non_cash = 1. - cash data['e19800'] = data['CHARITABLE'] * cash data['e20100'] = data['CHARITABLE'] * non_cash # Apply student loan interest deduction limit data['e03210'] = np.where(data.SLINT > 2500, 2500, data.SLINT) # Apply IRA contribution limit deductable_ira = np.where(data.AGE >= 50, np.where(data.ADJIRA > 6500, 6500, data.ADJIRA), np.where(data.ADJIRA > 5500, 5500, data.ADJIRA)) data['e03150'] = deductable_ira return data
ef1a6464e4bb0832a9398ad01e878c8aa4e6a620
3,649,706
def select_interacting(num_mtx, bin_mtx, labels): """ Auxiliary function for fit_msa_mdels. Used for fitting the models in hard EM; selects observations with a hidden variable value of 1. """ if labels is None: # This is the case when initializing the models return num_mtx, bin_mtx, labels else: # This is the case inside the EM loop labels = np.asarray(labels) idxs = np.where(np.asarray(labels) == 1)[0] int_num = np.take(num_mtx, idxs, axis=0) int_bin = np.take(bin_mtx, idxs, axis=0) weights = np.take(labels, idxs) return int_num, int_bin, weights
4c4f6fd7b44d4c388f7ce9ba30e91886548c85ee
3,649,708
def _GenDiscoveryDoc(service_class_names, doc_format, output_path, hostname=None, application_path=None): """Write discovery documents generated from a cloud service to file. Args: service_class_names: A list of fully qualified ProtoRPC service names. doc_format: The requested format for the discovery doc. (rest|rpc) output_path: The directory to output the discovery docs to. hostname: A string hostname which will be used as the default version hostname. If no hostname is specificied in the @endpoints.api decorator, this value is the fallback. Defaults to None. application_path: A string containing the path to the AppEngine app. Raises: ServerRequestException: If fetching the generated discovery doc fails. Returns: A list of discovery doc filenames. """ output_files = [] service_configs = GenApiConfig(service_class_names, hostname=hostname, application_path=application_path) for api_name_version, config in service_configs.iteritems(): discovery_doc = _FetchDiscoveryDoc(config, doc_format) discovery_name = api_name_version + '.discovery' output_files.append(_WriteFile(output_path, discovery_name, discovery_doc)) return output_files
a135c3805ee5b81e2f0f505b1710531e3db7d1f1
3,649,709
import dateutil.parser def nitestr(nite,sep=''): """ Convert an ephem.Date object to a nite string. Parameters: ----------- nite : ephem.Date object sep : Output separator Returns: -------- nitestr : String representation of the nite """ if isinstance(nite,basestring): nite = dateutil.parser.parse(nite) nite = ephem.Date(nite) strtuple = nite.tuple()[:3] nitestr = '{:4d}{sep}{:02d}{sep}{:02d}' nitestr = nitestr.format(*strtuple,sep=sep) return nitestr
493f34ad484bbd350254c45e38c41a9559a2ce14
3,649,710
def dump_tuple(tup): """ Dump a tuple to a string of fg,bg,attr (optional) """ return ','.join(str(i) for i in tup)
ffa4838e2794da9d525b60f4606633f8940480bb
3,649,712
from operator import ne def generate_fermi_question(cfg, logratio, filter_single_number_lhs=True): """ Generates one Fermi question. Args: cfg: Expression config logratio: Log ratio standard deviation (for RHS) filter_single_number_lhs: Whether to exclude lhs of a single numerical term round_bound: For numbers greater than this, we express in standard form and also make sure the rhs is rounded to 3 sig. figures """ done = False rhs_limit = 10**15 while not done: lhs = generate_expression(cfg) L = ne.evaluate(lhs['numerical']) if L > rhs_limit: continue if filter_single_number_lhs: if len(lhs['quantity_ids']) == 0 and lhs['length'] <= 1: continue # Always sample the rhs from a # lognormal with a larger variance the larger the number is if L == 0: R = 0 while R == 0: # Loop until we get an R != L R = int(np.random.normal(0, 1)) else: R = L while R == L: # Loop until we hit an R != L # Now we set the variance of the log RHS so that it # grows as the quantity gets bigger sd = 0.1 + log10(abs(L)) * 0.065 + log10(abs(L))**2 * 0.0042 R_raw = sample_lognormal(L, sd) # Then round to 3 sf if R_raw != 0: R = int(round(R_raw, -int(floor(log10(abs(R_raw)))) + 2)) else: R = 0 assert R != L try: R = ne.evaluate(str(R)) done = True except: pass question = lhs['expression'] + ' < ' + "{:,}".format(int(R)) numerical = lhs['numerical'] + ' < ' + str(R) fermi_question = FermiQuestion(lhs['length'], question, numerical, lhs['estimation_difficulty'], lhs['quantity_ids'], lhs['categories'], lhs['quantity_strings']) return fermi_question
47ebeaa4389f7371fb56687788a696aa7e03426e
3,649,714
def build_dataset(cfg, default_args=None): """Build a dataset from config dict. Args: cfg (dict): Config dict. It should at least contain the key "type". default_args (dict | None, optional): Default initialization arguments. Default: None. Returns: Dataset: The constructed dataset. """ dataset = build_from_cfg(cfg, DATASETS, default_args) return dataset
12b7d9121b9395668b8d260790b980260e1e4ee5
3,649,715
def Packet_genReadUserTag(errorDetectionMode, buffer, size): """Packet_genReadUserTag(vn::protocol::uart::ErrorDetectionMode errorDetectionMode, char * buffer, size_t size) -> size_t""" return _libvncxx.Packet_genReadUserTag(errorDetectionMode, buffer, size)
111c391ee3bbef36e1d42666d608c72fb3e6c3cd
3,649,716
def prefetch(tensor_dict, capacity): """Creates a prefetch queue for tensors. Creates a FIFO queue to asynchronously enqueue tensor_dicts and returns a dequeue op that evaluates to a tensor_dict. This function is useful in prefetching preprocessed tensors so that the data is readily available for consumers. Example input pipeline when you don't need batching: ---------------------------------------------------- key, string_tensor = slim.parallel_reader.parallel_read(...) tensor_dict = decoder.decode(string_tensor) tensor_dict = preprocessor.preprocess(tensor_dict, ...) prefetch_queue = prefetcher.prefetch(tensor_dict, capacity=20) tensor_dict = prefetch_queue.dequeue() outputs = Model(tensor_dict) ... ---------------------------------------------------- For input pipelines with batching, refer to core/batcher.py Args: tensor_dict: a dictionary of tensors to prefetch. capacity: the size of the prefetch queue. Returns: a FIFO prefetcher queue """ names = tensor_dict.keys() dtypes = [t.dtype for t in tensor_dict.values()] shapes = [t.get_shape() for t in tensor_dict.values()] prefetch_queue = tf.PaddingFIFOQueue(capacity, dtypes=dtypes, shapes=shapes, names=names, name='prefetch_queue') enqueue_op = prefetch_queue.enqueue(tensor_dict) tf.train.queue_runner.add_queue_runner(tf.train.queue_runner.QueueRunner( prefetch_queue, [enqueue_op])) tf.summary.scalar('queue/%s/fraction_of_%d_full' % (prefetch_queue.name, capacity), tf.to_float(prefetch_queue.size()) * (1. / capacity)) return prefetch_queue
44320c6ea24d42b9add4bd3a3677b157ffcd24b8
3,649,717
def get_total_shares(): """ Returns a list of total shares (all, attending, in person, represented) for all voting principles. """ total_shares = { 'heads': [0, 0, 0, 0] # [all, attending, in person, represented] } principle_ids = VotingPrinciple.objects.values_list('id', flat=True) for principle_id in principle_ids: total_shares[principle_id] = [Decimal(0), Decimal(0), Decimal(0), Decimal(0)] # Query delegates. delegates = User.objects.filter(groups=2).select_related('votingproxy', 'keypad').prefetch_related('shares') shares_exists = VotingShare.objects.exists() for delegate in delegates: # Exclude delegates without shares -- who may only serve as proxies. if shares_exists and delegate.shares.count() == 0: continue total_shares['heads'][0] += 1 # Find the authorized voter. auth_voter = find_authorized_voter(delegate) # If auth_voter is delegate himself set index to 2 (in person) else 3 (represented). i = 2 if auth_voter == delegate else 3 attending = auth_voter is not None and auth_voter.is_present if config['voting_enable_votecollector']: attending = attending and hasattr(auth_voter, 'keypad') if attending: total_shares['heads'][i] += 1 # Add shares to total. for share in delegate.shares.all(): total_shares[share.principle_id][0] += share.shares if attending: total_shares[share.principle_id][i] += share.shares for k in total_shares.keys(): total_shares[k][1] = total_shares[k][2] + total_shares[k][3] return total_shares
c70ca3be6e0b7b9df03257b81f5abec343efa37e
3,649,718
def gen_check_box_idx(): """ Generate a list containing the coordinate of three finder patterns in QR-code Args: None Returns: idx_check_box: a list containing the coordinate each pixel of the three finder patterns """ idx_check_box = [] for i in range(7): idx_check_box.append((0, i)) idx_check_box.append((6, i)) idx_check_box.append((30, i)) idx_check_box.append((36, i)) idx_check_box.append((0, 30+i)) idx_check_box.append((6, 30+i)) for i in range(1, 6): idx_check_box.append((i, 0)) idx_check_box.append((i, 6)) idx_check_box.append((i, 30)) idx_check_box.append((i, 36)) idx_check_box.append((30+i, 0)) idx_check_box.append((30+i, 6)) for i in range(3): for j in range(3): idx_check_box.append((2+i, 2+j)) idx_check_box.append((32+i, 2+j)) idx_check_box.append((2+i, 32+j)) return idx_check_box
e26d9c5a3b093b52f54eb2c65b844215c40ddab8
3,649,719
import pandas def preprocess_mc_parameters(n_rv, dict_safir_file_param, index_column='index'): """ NAME: preprocess_mc_parameters AUTHOR: Ian Fu DATE: 18 Oct 2018 DESCRIPTION: Takes a dictionary object with each item represents a safir input variable, distributed or static, distributed input parameter must be a dictionary object describing a distribution (see usage). PARAMETERS: :param n_rv: int, number of random samples for distributed parameters :param dict_safir_in_param: dict, safir input (problem definition) file parameterised variable names :param index_column: str, the returned DataFrame object :return df_params: row equal to n_rv with columns the items in dict_safir_in_param USAGE: """ # declare containers dict_result_params_static = dict() # container for storing static parameters dict_result_params_dist = dict() # container for storing distributed random parameters # populate static parameters and extract for key_, each_param in dict_safir_file_param.items(): if isinstance(each_param, dict): dict_result_params_dist[key_] = each_param else: if isinstance(each_param, list): if len(each_param) == n_rv: dict_result_params_dist[key_] = each_param else: dict_result_params_static[key_] = [each_param] * n_rv # make distribution random parameters dict_result_params_dist = preprocess_safir_mc_parameters(n_rv, dict_result_params_dist) # merge random distributed and static parameters dict_result_params = {**dict_result_params_static, **dict_result_params_dist} # make pandas.Dataframe if index_column not in dict_result_params: dict_result_params[index_column] = list(range(n_rv)) pf_params = pandas.DataFrame(dict_result_params) pf_params.set_index(index_column, inplace=True) return pf_params
28d04122234572b57d978fc9e993707cea45a00d
3,649,720
def AdjustColour(color, percent, alpha=wx.ALPHA_OPAQUE): """ Brighten/Darken input colour by percent and adjust alpha channel if needed. Returns the modified color. @param color: color object to adjust @param percent: percent to adjust +(brighten) or -(darken) @keyword alpha: amount to adjust alpha channel """ radj, gadj, badj = [ int(val * (abs(percent) / 100.0)) for val in color.Get() ] if percent < 0: radj, gadj, badj = [ val * -1 for val in [radj, gadj, badj] ] else: radj, gadj, badj = [ val or 255 for val in [radj, gadj, badj] ] red = min(color.Red() + radj, 255) green = min(color.Green() + gadj, 255) blue = min(color.Blue() + badj, 255) return wx.Colour(red, green, blue, alpha)
76ca657e632467c5db730161a34f19633add06f4
3,649,721
def getdates(startdate, utc_to_local, enddate=None): """ Generate '~astropy.tot_time.Time' objects corresponding to 16:00:00 local tot_time on evenings of first and last nights of scheduling period. Parameters ---------- startdate : str or None Start date (eg. 'YYYY-MM-DD'). If None, defaults to current date. enddate : str or None End date (eg. 'YYYY-MM-DD'). If None, defaults to day after start date. utc_to_local : '~astropy.unit' hours Time difference between utc and local tot_time. Returns ------- start : '~astropy.tot_time.core.Time' UTC corresponding to 16:00 local tot_time on first night end : '~astropy.tot_time.core.Time' UTC corresponding to 16:00 local tot_time on last night """ if startdate is None: current_utc = Time.now() start = Time(str((current_utc + utc_to_local).iso)[0:10] + ' 16:00:00.00') - utc_to_local else: try: start = Time(startdate + ' 16:00:00.00') - utc_to_local except ValueError as e: print(e) raise ValueError('\"{}\" not a valid date. Expected string of the form \'YYYY-MM-DD\''.format(startdate)) if enddate is None: # default number of observation nights is 1 return start, None else: try: end = Time(enddate + ' 16:00:00.00') - utc_to_local diff = int((end - start).value) # difference between startdate and enddate if diff <= 0: raise ValueError('End date \"{}\" occurs before or on start date.'.format(enddate)) except ValueError as e: print(e) raise ValueError('\"{}\" not a valid date. ' 'Must be after start date and of the form \'YYYY-MM-DD\''.format(enddate)) start.format = 'jd' end.format = 'jd' return start, end
1bee7b83b2b4ce3f3347544441762287a3ff1c83
3,649,722
def get_dataframe_tail(n): """ Returns last n rows of the DataFrame""" return dataset.tail(n)
03a01a9535da25d30c394a8339ebbd5bd0a80b03
3,649,724
import json def json_formatter(result, verbose=False, indent=4, offset=0): """Format result as json.""" string = json.dumps(result, indent=indent) string = string.replace("\n", "\n" + " "*offset) return string
512847722fa36eff408ac28d6e3dc8fde5c52af1
3,649,725
def _gumbel_softmax_sample(logits, temp=1, eps=1e-20): """ Draw a sample from the Gumbel-Softmax distribution based on https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb (MIT license) """ dims = logits.dim() gumbel_noise = _sample_gumbel(logits.size(), eps=eps, out=logits.data.new()) y = logits + Variable(gumbel_noise) return F.softmax(y / temp, dims - 1)
a61eac3861cdca17c1d2856c83bc70e03168bc45
3,649,727
from re import VERBOSE def interpolate_points(variable, variable_name, old_r, old_theta, new_r, new_theta): """Interpolate the old grid onto the new grid.""" grid = griddata( (old_r, old_theta), variable, (new_r, new_theta), method=INTERPOLATION_LEVEL, fill_value=-1 ) n_error = 0 for i, element in enumerate(grid): if element == -1: n_error += 1 grid[i] = griddata( (old_r, old_theta), variable, (new_r[i], new_theta[i]), method="nearest" ) if VERBOSE: print(f"{variable_name} interpolation problem for at r = {new_r[i]} theta = {np.rad2deg(new_theta[i])}") if n_error: print(f"There were {n_error} interpolation errors for {variable_name}") return grid
2a830e7cd04d5d0832d35a25bc58041ba192709b
3,649,728
def currentProgram(): """currentProgram page.""" return render_template( "currentProgram-index.j2.html", title="currentProgram", subtitle="Demonstration of Flask blueprints in action.", template="currentProgram-template", currentProgram=getCurrentProgr(), timeStarted=timeStarted, )
f5c914560d3c1791e34749321b78951313d5f058
3,649,730
def checkIsMember(request): """ ์‚ฌ์—…์ž๋ฒˆํ˜ธ๋ฅผ ์กฐํšŒํ•˜์—ฌ ์—ฐ๋™ํšŒ์› ๊ฐ€์ž…์—ฌ๋ถ€๋ฅผ ํ™•์ธํ•ฉ๋‹ˆ๋‹ค. - https://docs.popbill.com/statement/python/api#CheckIsMember """ try: # ์กฐํšŒํ•  ์‚ฌ์—…์ž๋“ฑ๋ก๋ฒˆํ˜ธ, '-' ์ œ์™ธ 10์ž๋ฆฌ targetCorpNum = "1234567890" response = statementService.checkIsMember(targetCorpNum) return render(request, 'response.html', {'code': response.code, 'message': response.message}) except PopbillException as PE: return render(request, 'exception.html', {'code': PE.code, 'message': PE.message})
861d4cc83102e036bb795bf8eafee2f7593925a4
3,649,731
from typing import List from typing import Optional from typing import Type def f1_score(y_true: List[List[str]], y_pred: List[List[str]], *, average: Optional[str] = 'micro', suffix: bool = False, mode: Optional[str] = None, sample_weight: Optional[List[int]] = None, zero_division: str = 'warn', scheme: Optional[Type[Token]] = None, partial_match: bool = False): """Compute the F1 score. The F1 score can be interpreted as a weighted average of the precision and recall, where an F1 score reaches its best value at 1 and worst score at 0. The relative contribution of precision and recall to the F1 score are equal. The formula for the F1 score is:: F1 = 2 * (precision * recall) / (precision + recall) Args: y_true : 2d array. Ground truth (correct) target values. y_pred : 2d array. Estimated targets as returned by a tagger. average : string, [None, 'micro' (default), 'macro', 'weighted'] If ``None``, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data: ``'micro'``: Calculate metrics globally by counting the total true positives, false negatives and false positives. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters 'macro' to account for label imbalance; it can result in an F-score that is not between precision and recall. sample_weight : array-like of shape (n_samples,), default=None Sample weights. zero_division : "warn", 0 or 1, default="warn" Sets the value to return when there is a zero division: - recall: when there are no positive labels - precision: when there are no positive predictions - f-score: both If set to "warn", this acts as 0, but warnings are also raised. mode : str, [None (default), `strict`]. if ``None``, the score is compatible with conlleval.pl. Otherwise, the score is calculated strictly. scheme : Token, [IOB2, IOE2, IOBES] suffix : bool, False by default. partial_match : bool, False by default. Returns: score : float or array of float, shape = [n_unique_labels]. Example: >>> from seqeval.metrics import f1_score >>> y_true = [['O', 'O', 'B-MISC', 'I-MISC', 'B-MISC', 'O', 'O'], ['B-PER', 'I-PER', 'O']] >>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'B-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] >>> f1_score(y_true, y_pred, average='micro') 0.6666666666666666 >>> f1_score(y_true, y_pred, average='macro') 0.75 >>> f1_score(y_true, y_pred, average='weighted') 0.6666666666666666 >>> f1_score(y_true, y_pred, average=None) array([0.5, 1. ]) """ if mode == 'strict' and scheme: _, _, f, _ = precision_recall_fscore_support_v1(y_true, y_pred, average=average, warn_for=('f-score',), beta=1, sample_weight=sample_weight, zero_division=zero_division, scheme=scheme, suffix=suffix ) else: _, _, f, _ = precision_recall_fscore_support(y_true, y_pred, average=average, warn_for=('f-score',), beta=1, sample_weight=sample_weight, zero_division=zero_division, suffix=suffix, partial_match=partial_match) return f
1354e306847af1decf59ae638a1cdecd265e569a
3,649,732
from typing import Any from typing import Counter def calc_proportion_identical(lst: Any) -> float: """ Returns a value between 0 and 1 for the uniformity of the values in LST, i.e. higher if they're all the same. """ def count_most_common(lst): """ Find the most common item in LST, and count how many times it occurs. """ # Counter(['a', 'b', 'a']).most_common(2) -> [ # ('a', 2), # ('b', 1), # ] # so this gives the count of the most common (in this case 2 occurrences of 'a') return Counter(lst).most_common(1)[0][1] most_common = count_most_common(lst) if most_common == 1: return 0 else: return most_common / len(lst)
adf467eba11694c5ea4583d7b53029110e59e25a
3,649,733
def _rolling_mad(arr, window): """Rolling window MAD outlier detection on 1d array.""" outliers = [] for i in range(window, len(arr)): cur = arr[(i - window) : i] med, cur_mad = _mad(cur) cur_out = cur > (med + cur_mad * 3) idx = list(np.arange((i - window), i)[cur_out]) outliers += idx outliers = list(set(outliers)) # turn index into boolean bool_outliers = np.zeros(arr.shape[0], dtype=bool) bool_outliers[outliers] = True return bool_outliers
3f28dde448b3c567a92fd22e499f812cd748a507
3,649,734
def compute_mean_and_cov(embeds, labels): """Computes class-specific means and shared covariance matrix of given embedding. The computation follows Eq (1) in [1]. Args: embeds: An np.array of size [n_train_sample, n_dim], where n_train_sample is the sample size of training set, n_dim is the dimension of the embedding. labels: An np.array of size [n_train_sample, ] Returns: mean_list: A list of len n_class, and the i-th element is an np.array of size [n_dim, ] corresponding to the mean of the fitted Guassian distribution for the i-th class. cov: The shared covariance mmatrix of the size [n_dim, n_dim]. """ n_dim = embeds.shape[1] n_class = int(np.max(labels)) + 1 mean_list = [] cov = np.zeros((n_dim, n_dim)) for class_id in range(n_class): data = embeds[labels == class_id] data_mean = np.mean(data, axis=0) cov += np.dot((data - data_mean).T, (data - data_mean)) mean_list.append(data_mean) cov = cov / len(labels) return mean_list, cov
b6d5624b0cea9f6162ffad819d4d5917391ac73e
3,649,735
def wcenergy(seq: str, temperature: float, negate: bool = False) -> float: """Return the wc energy of seq binding to its complement.""" loop_energies = calculate_loop_energies_dict(temperature, negate) return sum(loop_energies[seq[i:i + 2]] for i in range(len(seq) - 1))
90eae3d85e90019571e4f5d674fb93d58c1d7287
3,649,736
def upload_csv(): """ Upload csv file """ upload_csv_form = UploadCSVForm() if upload_csv_form.validate_on_submit(): file = upload_csv_form.csv.data ClassCheck.process_csv_file(file) flash('CSV file uploaded!', 'success') return redirect('/')
620cc3b4e11c71fe9dedd24631f641304313150f
3,649,738
async def clean(request: Request) -> RedirectResponse: """Access this view (GET "/clean") to remove all session contents.""" request.session.clear() return RedirectResponse("/")
3ef0d9298fcd7879becc8ae246656a62086f639a
3,649,739