content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import typing def func_xy_args_kwargs_annotate( x: "0", y, *args: "2", **kwargs: "4" ) -> typing.Tuple: """func. Parameters ---------- x, y: float args: tuple kwargs: dict Returns ------- x, y: float args: tuple kwargs: dict """ return x, y, None, None, args, None, None, kwargs
41d06b792ac3d794e1c0ea8bedc1708bb5b4e969
3,654,391
import torch def mp_nerf_torch(a, b, c, l, theta, chi): """ Custom Natural extension of Reference Frame. Inputs: * a: (batch, 3) or (3,). point(s) of the plane, not connected to d * b: (batch, 3) or (3,). point(s) of the plane, not connected to d * c: (batch, 3) or (3,). point(s) of the plane, connected to d * theta: (batch,) or (float). angle(s) between b-c-d * chi: (batch,) or float. dihedral angle(s) between the a-b-c and b-c-d planes Outputs: d (batch, 3) or (float). the next point in the sequence, linked to c """ # safety check if not ( (-np.pi <= theta) * (theta <= np.pi) ).all().item(): raise ValueError(f"theta(s) must be in radians and in [-pi, pi]. theta(s) = {theta}") # calc vecs ba = b-a cb = c-b # calc rotation matrix. based on plane normals and normalized n_plane = torch.cross(ba, cb, dim=-1) n_plane_ = torch.cross(n_plane, cb, dim=-1) rotate = torch.stack([cb, n_plane_, n_plane], dim=-1) rotate /= torch.norm(rotate, dim=-2, keepdim=True) # calc proto point, rotate. add (-1 for sidechainnet convention) # https://github.com/jonathanking/sidechainnet/issues/14 d = torch.stack([-torch.cos(theta), torch.sin(theta) * torch.cos(chi), torch.sin(theta) * torch.sin(chi)], dim=-1).unsqueeze(-1) # extend base point, set length return c + l.unsqueeze(-1) * torch.matmul(rotate, d).squeeze()
2c42339455f6549e87488d12dec44282a6570d63
3,654,392
def makemarkers(nb): """ Give a list of cycling markers. See http://matplotlib.org/api/markers_api.html .. note:: This what I consider the *optimal* sequence of markers, they are clearly differentiable one from another and all are pretty. Examples: >>> makemarkers(7) ['o', 'D', 'v', 'p', '<', 's', '^'] >>> makemarkers(12) ['o', 'D', 'v', 'p', '<', 's', '^', '*', 'h', '>', 'o', 'D'] """ allmarkers = ['o', 'D', 'v', 'p', '<', 's', '^', '*', 'h', '>'] longlist = allmarkers * (1 + int(nb / float(len(allmarkers)))) # Cycle the good number of time return longlist[:nb]
a1dc00cdb831b3b622670a5f36ba956273379b16
3,654,393
import types import typing def uselist(*, schema: types.Schema, schemas: types.Schemas) -> typing.Optional[bool]: """ Retrieve the x-uselist of the schema. Raises MalformedSchemaError if the x-uselist value is not a boolean. Args: schema: The schema to get x-uselist from. schemas: The schemas for $ref lookup. Returns: The x-uselist or None. """ value = peek_key( schema=schema, schemas=schemas, key=types.ExtensionProperties.USELIST ) if value is None: return None if not isinstance(value, bool): raise exceptions.MalformedSchemaError( "The x-uselist property must be of type boolean." ) return value
eea45ef82a2d2715473a7a2203dcfdef1e958805
3,654,395
def getIPRules(): """ Fetches a json representation of the Iptables rules on the server GET: json object with the all the iptables rules on the system """ return jsonify({"result" : True, "rules" : hl.getIptablesRules()})
5b91978c0329105ff85f02deeccce707182b5551
3,654,396
def _get_only_relevant_data(video_data): """ Method to build ES document with only the relevant information """ return { "kind": video_data["kind"], "id": video_data["id"], "published_at": video_data["snippet"]["publishedAt"], "title": video_data["snippet"]["title"], "description": video_data["snippet"]["description"], "thumbnail_url": video_data["snippet"]["thumbnails"]["default"]["url"], "channel_title": video_data["snippet"]["channelTitle"], }
b5d2a0cf2c5b7121c92e95adb524379d7cf3eb9c
3,654,397
def get_mask(img): """ Convert an image to a mask array. """ gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) ret, mask = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY) return mask
255e396b12f61dfe9d98fcf9c89fdf6b486a7a95
3,654,398
def b32encode(hex_values, pad_left=True): """ Base32 encoder algorithm for Nano. Transforms the given hex_value into a base-32 representation. The allowed letters are: "13456789abcdefghijkmnopqrstuwxyz" :param hex_values: Hexadecimal values (string) or byte array containing the data to be encoded. :param pad_left: True if a byte of 0s should be prepended to the input. False otherwise. This padding is required when generating a nanoblocks address with this algorithm. """ if type(hex_values) is str: data_bytes = int(hex_values, 16).to_bytes(32, "big") else: data_bytes = hex_values data_binary = ("0000" if pad_left else "") + "".join([f'{p:08b}' for p in data_bytes]) data_encoded = [int(split, 2) for split in chunkize(data_binary, 5)] return "".join(pub_key_map.iloc[data_encoded].tolist())
7a58b56ad4d3733da6d7c211bab470d8cde63e9c
3,654,399
def get_mfp(g, gv): """Calculate mean free path from inverse lifetime and group velocity.""" g = np.where(g > 0, g, -1) gv_norm = np.sqrt((gv**2).sum(axis=2)) mean_freepath = np.where(g > 0, gv_norm / (2 * 2 * np.pi * g), 0) return mean_freepath
bcef3e92de1b81a8688b3a732dd7af0dd9ce6b8c
3,654,400
def find_prime_root(l, blum=True, n=1): """Find smallest prime of bit length l satisfying given constraints. Default is to return Blum primes (primes p with p % 4 == 3). Also, a primitive root w is returned of prime order at least n. """ if l == 1: assert not blum assert n == 1 p = 2 w = 1 elif n <= 2: n = 2 w = -1 p = gmpy2.next_prime(2**(l - 1)) if blum: while p % 4 != 3: p = gmpy2.next_prime(p) p = int(p) else: assert blum if not gmpy2.is_prime(n): n = int(gmpy2.next_prime(n)) p = 1 + n * (1 + (n**2) % 4 + 4 * ((2**(l - 2)) // n)) while not gmpy2.is_prime(p): p += 4 * n a = 1 w = 1 while w == 1: a += 1 w = gmpy2.powmod(a, (p - 1) // n, p) p, w = int(p), int(w) return p, n, w
be2d465fdb8de45dc2574788c12b8f78f4601508
3,654,403
import json def set_parameters(_configs, new=False): """ Sets configuration parameters Parameters ---------- _configs : Dictionary containing configuration options from the config file (config.json) new : bool Do you want to start from a new file? Returns ------- _configs : Updated dictionary containing configuration options from the config file (config.json) """ if new: _configs = {x: "NA" for x in _configs} print('*Do not include single or double quotes*\n') if _configs['eye_mask_path'] == 'NA': _eye_mask_path = input('Add the full eye mask filepath: ') _configs['eye_mask_path'] = _eye_mask_path if _configs['train_file'] == 'NA': _train_file = input('Add the name of the file used for training [peer1.nii.gz]: ') if not _train_file: _configs['train_file'] = 'peer1.nii.gz' else: _configs['train_file'] = _train_file if _configs['test_file'] == 'NA': _test_file = input('Which file would you like to predict eye movements from? [movie.nii.gz]: ') if not _test_file: _configs['test_file'] = 'movie.nii.gz' else: _configs['test_file'] = _test_file if _configs['use_gsr'] == 'NA': _use_gsr = input('Use global signal regression? (y/n) [n]: ') if (not _use_gsr) or (_use_gsr == 'n'): _configs['use_gsr'] = "0" else: _configs['use_gsr'] = "1" if _configs['motion_scrub'] == 'NA': _use_ms = input('Use motion scrubbing? (y/n) [n]: ') if (not _use_ms) or (_use_ms == 'n'): _configs['use_ms'] = "0" _configs['motion_threshold'] = "0" _configs['motion_scrub'] = "Not implemented" elif _use_ms == 'y': _configs['use_ms'] = "1" _motion_scrub_filename = input('Add the filename of the CSV that contains the framewise displacement \ time series [motion_ts.csv]: ') if not _motion_scrub_filename: _configs['motion_scrub'] = 'motion_ts.csv' else: _configs['motion_scrub'] = _motion_scrub_filename _motion_threshold = input('Add a motion threshold for motion scrubbing [.2]: ') if not _motion_threshold: _configs['motion_threshold'] = ".2" else: _configs['motion_threshold'] = _motion_threshold with open('peer/config.json', 'w') as f: json.dump(_configs, f) return _configs
7c0d52f5a2ee5df9b54278162570606d684a6a64
3,654,404
def vgg16(mask_init='1s', mask_scale=1e-2, threshold_fn='binarizer', **kwargs): """VGG 16-layer model (configuration "D").""" model = VGG(make_layers(cfg['D'], mask_init, mask_scale, threshold_fn), mask_init, mask_scale, threshold_fn, **kwargs) return model
fa3a17460988a2c87ca63b287674b9836c7f69ac
3,654,407
def sort(X): """ Return sorted elements of :param:`X` and array of corresponding sorted indices. :param X: Target vector. :type X: :class:`scipy.sparse` of format csr, csc, coo, bsr, dok, lil, dia or :class:`numpy.matrix` """ assert 1 in X.shape, "X should be vector." X = X.flatten().tolist()[0] return sorted(X), sorted(list(range(len(X))), key=X.__getitem__)
a176e2538fd1c0042eefc6962d1b354b7b4ca736
3,654,408
def get_query(sf, query_text, verbose=True): """ Returns a list of lists based on a SOQL query with the fields as the header column in the first list/row """ # execute query for up to 2,000 records gc = sf.query(query_text) records = gc['records'] if verbose: print('Reading from %s object' % records[0]['attributes']['type'], flush=True) headers = list(records[0].keys())[1:] # get the headers return_table = [ [record[heading] for heading in headers] for record in records] return_table.insert(0, headers) # the above is complete unless there are >2,000 records total_read_so_far = len(records) while not gc['done']: if verbose: print('Progress: {} records out of {}'.format( total_read_so_far, gc['totalSize']), flush=True) gc = sf.query_more(gc['nextRecordsUrl'], True) records = gc['records'] total_read_so_far += len(records) next_table = [ [record[heading] for heading in headers] for record in records] return_table.extend(next_table) return return_table
ea93b6652a2d455b368a831d8c6d6b4554023313
3,654,409
import io import csv def strip_blank(contents): """ strip the redundant blank in file contents. """ with io.StringIO(contents) as csvfile: csvreader = csv.reader(csvfile, delimiter=",", quotechar='"') rows = [] for row in csvreader: rows.append(",".join(['"{}"'.format(x.strip()) for x in row])) return "\n".join(rows)
d446f2123aa3cfe3b1966151f323fa1c4e41cb08
3,654,410
def generate_id() -> str: """Generates an uuid v4. :return: Hexadecimal string representation of the uuid. """ return uuid4().hex
674d0bea01f9109e02af787435d7cee5c37f0a5a
3,654,411
def perms_of_length(n, length): """Return all permutations in :math:`S_n` of the given length (i.e., with the specified number of inversion). This uses the algorithm in `<http://webhome.cs.uvic.ca/~ruskey/Publications/Inversion/InversionCAT.pdf>`_. :param n: specifies the permutation group :math:`S_n`. :param length: number of inversions. :rtype: list of :class:`sage.Permutation` """ result = [] def gen(S, l, suffix=[]): if l == 0: result.append(Permutation(S + suffix)) return n = len(S) bin = (n - 1) * (n - 2) / 2 for i in range(n): if n - (i + 1) <= l <= bin + n - (i + 1): x = S[i] gen(S[0:i] + S[i + 1 :], l - n + (i + 1), [x] + suffix) gen(S=list(range(1, n + 1)), l=length) return result
da18a1a8b2dad5a0084f3d557a2cc1018798d33e
3,654,412
def rank_by_entropy(pq, kl=True): """ evaluate kl divergence, wasserstein distance wasserstein: http://pythonhosted.org/pyriemann/_modules/pyriemann/utils/distance.html """ # to avoid Inf cases pq = pq + 0.0000001 pq = pq/pq.sum(axis=0) if kl: # entropy actually can calculate KL divergence final=pq.iloc[:, :-1].apply( lambda x: stats.entropy(x, pq.iloc[:, -1], base=2), axis=0) label = 'KL' else: # JS divergence final=pq.iloc[:, :-1].apply( lambda x: JSD(x, pq.iloc[:, -1]), axis=0) label = 'JSD' final.sort_values(ascending=False, inplace=True) rank = final.rank(ascending=False) final = pd.concat([final, rank], axis=1) final.columns = [label, 'rank'] return final
0b47e2ba8de66148a50dbb1b4637897ac7bdee4b
3,654,413
def generate_graph_properties(networks): """ This function constructs lists with centrality rankings of nodes in multiple networks. Instead of using the absolute degree or betweenness centrality, this takes metric bias into account. If the graph is not connected, the values are calculated for the largest connected component. :param networks: List of input networks :return: Pandas dataframe with rankings """ properties = dict() property_names = ['Assortativity', 'Connectivity', 'Diameter', 'Radius', 'Average shortest path length'] for property in property_names: properties[property] = list() for network in networks: if len(network[1].nodes) > 0: properties['Assortativity'].append((network[0], nx.degree_pearson_correlation_coefficient(network[1]))) properties['Connectivity'].append((network[0], nx.average_node_connectivity(network[1]))) if nx.is_connected(network[1]): properties['Diameter'].append((network[0], nx.diameter(network[1]))) properties['Radius'].append((network[0], nx.radius(network[1]))) properties['Average shortest path length'].append((network[0], nx.average_shortest_path_length(network[1]))) else: components = list(nx.connected_components(network[1])) sizes = [] for component in components: sizes.append(len(component)) subnetwork = nx.subgraph(network[1], components[np.where(np.max(sizes) == sizes)[0][0]]) properties['Diameter'].append((network[0], nx.diameter(subnetwork))) properties['Radius'].append((network[0], nx.radius(subnetwork))) properties['Average shortest path length'].append((network[0], nx.average_shortest_path_length(subnetwork))) else: properties['Assortativity'].append(None) properties['Connectivity'].append(None) properties['Diameter'].append(None) properties['Radius'].append(None) properties['Average shortest path length'].append(None) return properties
e135c4211d924ab9f1af6baec06b8b313a96b11f
3,654,414
def anova_old( expression, gene_id, photoperiod_set, strain_set, time_point_set, num_replicates ): """One-way analysis of variance (ANOVA) using F-test.""" num_groups = len(photoperiod_set) * len(strain_set) * len(time_point_set) group_size = num_replicates total_expression = 0 # First scan: calculate overall average. for pp in photoperiod_set: for ss in strain_set: for tt in time_point_set: total_expression += sum(expression[(gene_id, pp, ss, tt)]) overall_avg = total_expression / num_groups / group_size # Second scan: calculate variances. in_group_var = 0 bt_group_var = 0 for pp in photoperiod_set: for ss in strain_set: for tt in time_point_set: group = expression[(gene_id, pp, ss, tt)] group_avg = sum(group) / group_size in_group_var += group_size * (group_avg - overall_avg) ** 2 for element in group: bt_group_var += (element - group_avg) ** 2 dof = (num_groups - 1, group_size * num_groups - num_groups) f_stat = bt_group_var / dof[0] / in_group_var * dof[1] return f_stat, dof
f809e0e2be877e1a0f21ca1e05a7079db80254a1
3,654,415
import struct def _make_ext_reader(ext_bits, ext_mask): """Helper for Stroke and ControlPoint parsing. Returns: - function reader(file) -> list<extension values> - function writer(file, values) - dict mapping extension_name -> extension_index """ # Make struct packing strings from the extension details infos = [] while ext_mask: bit = ext_mask & ~(ext_mask-1) ext_mask = ext_mask ^ bit try: info = ext_bits[bit] except KeyError: info = ext_bits['unknown'](bit) infos.append(info) print(infos) if len(infos) == 0: print("[_make_ext_reader lambda] f:", f) return (lambda f: [], lambda f, vs: None, {}) fmt = '<' + ''.join(info[1] for info in infos) names = [info[0] for info in infos] if '@' in fmt: # struct.unpack isn't general enough to do the job fmts = ['<'+info[1] for info in infos] def reader(f, fmts=fmts): print("[_make_ext_reader reader 1] f:", f, "fmt:", fmt) values = [None] * len(fmts) for i,fmt in enumerate(fmts): if fmt == '<@': nbytes, = struct.unpack('<I', f.read(4)) values[i] = f.read(nbytes) else: values[i], = struct.unpack(fmt, f.read(4)) else: def reader(f, fmt=fmt, nbytes=len(infos)*4): print("[_make_ext_reader reader 2] f:", f, "fmt:", fmt, "nbytes:", nbytes) values = list(struct.unpack(fmt, f.read(nbytes))) print("values", values) return values def writer(f, values, fmt=fmt): print("[_make_ext_reader writer] f:", f, "values:", values, "fmt:", fmt) return f.write(struct.pack(fmt, *values)) lookup = dict( (name,i) for (i,name) in enumerate(names) ) return reader, writer, lookup
2f85ab0f09d5a4cbd2aad7a9819440b610bcf20c
3,654,416
def resolve_covariant(n_total, covariant=None): """Resolves a covariant in the following cases: - If a covariant is not provided a diagonal matrix of 1s is generated, and symmetry is checked via a comparison with the datasets transpose - If a covariant is provided, the symmetry is checked args: n_total {int} -- total number of informative features covariant {[type]} -- [description] (default: {None}) returns: covariant {np_array} """ if covariant is None: print("No covariant provided, generating one.") covariant = np.diag(np.ones(n_total)) # test for symmetry on covariance matrix by comparing the matrix to its transpose try: assert np.all(covariant == covariant.T) except AssertionError: print("Assertion error - please check covariance matrix is symmetric.") return covariant
cd32136786d36e88204574a739006239312bb99e
3,654,417
from typing import Optional from typing import Union def create_generic_constant( type_spec: Optional[computation_types.Type], scalar_value: Union[int, float]) -> building_blocks.ComputationBuildingBlock: """Creates constant for a combination of federated, tuple and tensor types. Args: type_spec: A `computation_types.Type` containing only federated, tuple or tensor types, or `None` to use to construct a generic constant. scalar_value: The scalar value we wish this constant to have. Returns: Instance of `building_blocks.ComputationBuildingBlock` representing `scalar_value` packed into `type_spec`. Raises: TypeError: If types don't match their specification in the args section. Notice validation of consistency of `type_spec` with `scalar_value` is not the rsponsibility of this function. """ if type_spec is None: return create_tensorflow_constant(type_spec, scalar_value) py_typecheck.check_type(type_spec, computation_types.Type) inferred_scalar_value_type = type_conversions.infer_type(scalar_value) if (not inferred_scalar_value_type.is_tensor() or inferred_scalar_value_type.shape != tf.TensorShape(())): raise TypeError( 'Must pass a scalar value to `create_generic_constant`; encountered a ' 'value {}'.format(scalar_value)) if not type_analysis.contains_only( type_spec, lambda t: t.is_federated() or t.is_struct() or t.is_tensor()): raise TypeError if type_analysis.contains_only(type_spec, lambda t: t.is_struct() or t.is_tensor()): return create_tensorflow_constant(type_spec, scalar_value) elif type_spec.is_federated(): unplaced_zero = create_tensorflow_constant(type_spec.member, scalar_value) if type_spec.placement == placements.CLIENTS: placement_federated_type = computation_types.FederatedType( type_spec.member, type_spec.placement, all_equal=True) placement_fn_type = computation_types.FunctionType( type_spec.member, placement_federated_type) placement_function = building_blocks.Intrinsic( intrinsic_defs.FEDERATED_VALUE_AT_CLIENTS.uri, placement_fn_type) elif type_spec.placement == placements.SERVER: placement_federated_type = computation_types.FederatedType( type_spec.member, type_spec.placement, all_equal=True) placement_fn_type = computation_types.FunctionType( type_spec.member, placement_federated_type) placement_function = building_blocks.Intrinsic( intrinsic_defs.FEDERATED_VALUE_AT_SERVER.uri, placement_fn_type) return building_blocks.Call(placement_function, unplaced_zero) elif type_spec.is_struct(): elements = [] for k in range(len(type_spec)): elements.append(create_generic_constant(type_spec[k], scalar_value)) names = [name for name, _ in structure.iter_elements(type_spec)] packed_elements = building_blocks.Struct(elements) named_tuple = create_named_tuple(packed_elements, names, type_spec.python_container) return named_tuple else: raise ValueError( 'The type_spec {} has slipped through all our ' 'generic constant cases, and failed to raise.'.format(type_spec))
e440ef6470eacd66fc51210f288c3bf3c14486c6
3,654,418
def all_same(lst: list) -> bool: """test if all list entries are the same""" return lst[1:] == lst[:-1]
4ef42fc65d64bc76ab1f56d6e03def4cb61cf6f0
3,654,419
def binary_find(N, x, array): """ Binary search :param N: size of the array :param x: value :param array: array :return: position where it is found. -1 if it is not found """ lower = 0 upper = N while (lower + 1) < upper: mid = int((lower + upper) / 2) if x < array[mid]: upper = mid else: lower = mid if array[lower] <= x: return lower return -1
ed6e7cc15de238381dbf65eb6c981676fd0525f5
3,654,420
def _add_data_entity(app_context, entity_type, data): """Insert new entity into a given namespace.""" old_namespace = namespace_manager.get_namespace() try: namespace_manager.set_namespace(app_context.get_namespace_name()) new_object = entity_type() new_object.data = data new_object.put() return new_object finally: namespace_manager.set_namespace(old_namespace)
864e12973ad7cfd4c89fbefb211b8b940913590f
3,654,421
def scalarmat(*q): """multiplies every object in q with each object in q. Should return a unity matrix for an orthonormal system""" ret=[] for a in q: toa=[] for b in q: toa.append(a*b) ret.append(toa) return ret
a61c813b548f1934e16517efc4d203c6390097fe
3,654,422
import time def frames_per_second(): """ Return the estimated frames per second Returns the current estimate for frames-per-second (FPS). FPS is estimated by measured the amount of time that has elapsed since this function was previously called. The FPS estimate is low-pass filtered to reduce noise. This function is intended to be called one time for every iteration of the program's main loop. Returns ------- fps : float Estimated frames-per-second. This value is low-pass filtered to reduce noise. """ global _time_prev, _fps time_now = time.time() * 1000.0 dt = time_now - _time_prev _time_prev = time_now if dt == 0.0: return _fps.value return _fps.update(1000.0 / dt)
0ac78e052d1e3f4d09a332bd71df041f14a46111
3,654,423
def modularity(partition, graph, weight='weight'): """Compute the modularity of a partition of a graph Parameters ---------- partition : dict the partition of the nodes, i.e a dictionary where keys are their nodes and values the communities graph : networkx.Graph the networkx graph which is decomposed weight : str, optional the key in graph to use as weight. Default to 'weight' Returns ------- modularity : float The modularity Raises ------ KeyError If the partition is not a partition of all graph nodes ValueError If the graph has no link TypeError If graph is not a networkx.Graph References ---------- .. 1. Newman, M.E.J. & Girvan, M. Finding and evaluating community structure in networks. Physical Review E 69, 26113(2004). Examples -------- >>> G=nx.erdos_renyi_graph(100, 0.01) >>> part = best_partition(G) >>> modularity(part, G) if type(graph) != nx.Graph: raise TypeError("Bad graph type, use only non directed graph") """ inc = dict([]) deg = dict([]) links = graph.size(weight=weight) if links == 0: raise ValueError("A graph without link has an undefined modularity") for node in graph: com = partition[node] deg[com] = deg.get(com, 0.) + graph.degree(node, weight=weight) for neighbor, datas in graph[node].items(): edge_weight = datas.get(weight, 1) if partition[neighbor] == com: if neighbor == node: inc[com] = inc.get(com, 0.) + float(edge_weight) else: inc[com] = inc.get(com, 0.) + float(edge_weight) / 2. res = 0. for com in set(partition.values()): res += inc.get(com, 0.) - \ ((deg.get(com, 0.) ** 2) / (4. * links)) return (1.0 / links) * res
371c3f5e362114896bf0559efe452d79af6e79f8
3,654,424
def config_lst_bin_files(data_files, dlst=None, atol=1e-10, lst_start=0.0, fixed_lst_start=False, verbose=True, ntimes_per_file=60): """ Configure lst grid, starting LST and output files given input data files and LSTbin params. Parameters ---------- data_files : type=list of lists: nested set of lists, with each nested list containing paths to miriad files from a particular night. These files should be sorted by ascending Julian Date. Frequency axis of each file must be identical. dlst : type=float, LST bin width. If None, will get this from the first file in data_files. lst_start : type=float, starting LST for binner as it sweeps from lst_start to lst_start + 2pi. fixed_lst_start : type=bool, if True, LST grid starts at lst_start, regardless of LST of first data record. Otherwise, LST grid starts at LST of first data record. ntimes_per_file : type=int, number of LST bins in a single output file Returns (lst_grid, dlst, file_lsts, start_lst) ------- lst_grid : float ndarray holding LST bin centers dlst : float, LST bin width of output lst_grid file_lsts : list, contains the lst grid of each output file start_lst : float, starting lst for LST binner """ # get dlst from first data file if None if dlst is None: start, stop, int_time = utils.get_miriad_times(data_files[0][0]) dlst = int_time # get start and stop times for each list of files in data_files. # add_int_buffer adds an integration to the end time of df[:-1] files, # and the %(2pi) ensures everything is within a 2pi LST grid. data_times = [] for df in data_files: data_times.append(np.array(utils.get_miriad_times(df, add_int_buffer=True))[:2, :].T % (2 * np.pi)) # unwrap data_times less than lst_start, get starting and ending lst start_lst = 100 end_lst = -1 for dt in data_times: # unwrap starts below lst_start dt[:, 0][dt[:, 0] < lst_start - atol] += 2 * np.pi # unwrap ends below starts dt[:, 1][dt[:, 1] < dt[:, 0] - atol] += 2 * np.pi # get start and end lst start_lst = np.min(np.append(start_lst, dt[:, 0])) end_lst = np.max(np.append(end_lst, dt.ravel())) # ensure start_lst isn't beyond 2pi if start_lst >= (2 * np.pi): start_lst -= 2 * np.pi end_lst -= 2 * np.pi for dt in data_times: dt -= 2 * np.pi # create lst_grid if fixed_lst_start: start_lst = lst_start lst_grid = make_lst_grid(dlst, lst_start=start_lst, verbose=verbose) dlst = np.median(np.diff(lst_grid)) # get starting and stopping lst_grid indices start_diff = lst_grid - start_lst start_diff[start_diff < -dlst / 2 - atol] = 100 start_index = np.argmin(start_diff) end_diff = lst_grid - end_lst end_diff[end_diff > dlst / 2 + atol] = -100 end_index = np.argmax(end_diff) # get number of output files nfiles = int(np.ceil(float(end_index - start_index) / ntimes_per_file)) # get output file lsts file_lsts = [lst_grid[start_index:end_index][ntimes_per_file * i:ntimes_per_file * (i + 1)] for i in range(nfiles)] return data_times, lst_grid, dlst, file_lsts, start_lst
b91cd59bf8d9693bb255c10ef9fb5ce3ef219a41
3,654,425
def get_str_arr_info(val): """ Find type of string in array val, and also the min and max length. Return None if val does not contain strings.""" fval = np.array(val).flatten() num_el = len(fval) max_length = 0 total_length = 0 for sval in fval: len_sval = len(sval) if len_sval > max_length: max_length = len_sval total_length += len_sval return (num_el, max_length, total_length)
283233c780379ca637f621510fa09c359ff53784
3,654,426
from typing import Callable from typing import Any def wrap( module: nn.Module, cls: Callable = FullyShardedDataParallel, activation_checkpoint: bool = False, **wrap_overrides: Any ) -> nn.Module: """ Annotate that a module should be wrapped. Annotated modules will only be wrapped if inside of an :func:`enable_wrap` context manager. An important use case is annotating large layers that should be sharded (in-place) during initialization, to avoid running out of system memory. Usage:: with enable_wrap(**params): # Wraps layer in FSDP by default if within context self.l1 = wrap(torch.nn.Linear(5, 5)) Args: module (nn.Module): module to wrap (if in :func:`enable_wrap` context) cls (Callable): class wrapper to wrap the model with if in context (default: :class:`FullyShardedDataParallel`) activation_checkpoint (bool): use activation checkpointing wrapper (default: False) **wrap_overrides: configuration overrides that will take priority over the values provided by the :func:`enable_wrap` context """ if ConfigAutoWrap.in_autowrap_context: wrap_overrides = {**ConfigAutoWrap.kwargs, **wrap_overrides} if activation_checkpoint: module = checkpoint_wrapper(module) return cls(module, **wrap_overrides) return module
cdf313b9100ee2a2f3a9d3ed47fafa76dea16b74
3,654,429
def _is_multiple_state(state_size): """Check whether the state_size contains multiple states.""" return (hasattr(state_size, '__len__') and not isinstance(state_size, tensor_shape.TensorShape))
f034b2a4656edf72be515d99093efc3b03591af0
3,654,430
def deque_to_yaml(representer, node): """Convert collections.deque to YAML""" return representer.represent_sequence("!collections.deque", (list(node), node.maxlen))
5ff503b4f21af58cf96d26171e078ddd5d754141
3,654,431
from bs4 import BeautifulSoup def parse_webpage(url, page_no): """ Parses the given webpage using 'BeautifulSoup' and returns html content of that webpage. """ page = urllib2.urlopen(url + page_no) parsed_page = BeautifulSoup(page, 'html.parser') return parsed_page
774046c85cc38f3575cabc473c93b92b6dbc3d25
3,654,432
import random def randomDigits(length=8): """ 生成随机数字串 randomDigits() ==> 73048139 """ return ''.join([random.choice(digits) for _ in range(length)])
cb4200ea4d6850888461880bc3d9cc0ea6804993
3,654,433
from typing import Callable def some_func(string: str, function: Callable) -> bool: """Check if some elements in a string match the function (functional). Args: string: <str> string to verify. function: <callable> function to call. Returns: True if some of elements are in the sequence are True. Examples: >>> assert some_func('abcdefg&%$', str.isalpha) >>> assert not some_func('&%$=', str.isalpha) """ return any(map(function, string)) and not all(map(function, string))
e67af6613975a6757905087397ff8b68e83ddbf6
3,654,435
def UseExceptions(*args): """UseExceptions()""" return _ogr.UseExceptions(*args)
71a8e36c0554796298a5e8c9a3e88bf423acef5b
3,654,436
def get_mlm_logits(input_tensor, albert_config, mlm_positions, output_weights): """From run_pretraining.py.""" input_tensor = gather_indexes(input_tensor, mlm_positions) with tf.variable_scope("cls/predictions"): # We apply one more non-linear transformation before the output layer. # This matrix is not used after pre-training. with tf.variable_scope("transform"): input_tensor = tf.layers.dense( input_tensor, units=albert_config.embedding_size, activation=modeling.get_activation(albert_config.hidden_act), kernel_initializer=modeling.create_initializer( albert_config.initializer_range)) input_tensor = modeling.layer_norm(input_tensor) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. output_bias = tf.get_variable( "output_bias", shape=[albert_config.vocab_size], initializer=tf.zeros_initializer()) logits = tf.matmul( input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) return logits
36a2f10fe33aea371fcbf23ac856bf910998e1c9
3,654,437
def spm_hrf(TR, t1=6, t2=16, d1=1, d2=1, ratio=6, onset=0, kernel=32): """Python implementation of spm_hrf.m from the SPM software. Parameters ---------- TR : float Repetition time at which to generate the HRF (in seconds). t1 : float (default=6) Delay of response relative to onset (in seconds). t2 : float (default=16) Delay of undershoot relative to onset (in seconds). d1 : float (default=1) Dispersion of response. d2 : float (default=1) Dispersion of undershoot. ratio : float (default=6) Ratio of response to undershoot. onset : float (default=0) Onset of hemodynamic response (in seconds). kernel : float (default=32) Length of kernel (in seconds). Returns ------- hrf : array Hemodynamic repsonse function References ---------- [1] Adapted from the Poldrack lab fMRI tools. https://github.com/poldracklab/poldracklab-base/blob/master/fmri/spm_hrf.py """ ## Define metadata. fMRI_T = 16.0 TR = float(TR) ## Define times. dt = TR/fMRI_T u = np.arange(kernel/dt + 1) - onset/dt ## Generate (super-sampled) HRF. hrf = gamma(t1/d1,scale=1.0/(dt/d1)).pdf(u) - gamma(t2/d2,scale=1.0/(dt/d2)).pdf(u)/ratio ## Downsample. good_pts=np.array(range(np.int(kernel/TR)))*fMRI_T hrf=hrf[good_pts.astype(int)] ## Normalize and return. hrf = hrf/np.sum(hrf) return hrf
be07acb0980000a59f4df39f0ab7147dbb5d258e
3,654,438
def prob_active_neuron(activity_matrix): """Get expected co-occurrence under independence assumption. Parameters ---------- activity_matrix : np.array num_neurons by num_bins, boolean (1 or 0) Returns ------- prob_active : np.array Fraction of bins each cell participates in individually """ prob_active = np.mean(activity_matrix, axis=1) return prob_active
fd5eb513598d840602117adb0223c75b71660f8a
3,654,439
def translate_x(image: tf.Tensor, pixels: int, replace: int) -> tf.Tensor: """Equivalent of PIL Translate in X dimension.""" image = translate(wrap(image), [-pixels, 0]) return unwrap(image, replace)
53ea2bf905487a310d6271b37adef0523bcdf4de
3,654,440
def reduce_time_space_seasonal_regional( mv, season=seasonsyr, region=None, vid=None, exclude_axes=[] ): """Reduces the variable mv in all time and space dimensions. Any other dimensions will remain. The averages will be restricted to the the specified season and region. The season should be a cdutil.times.Seasons object. The region may be a string defining a region in defines.py, or it may be a list of four numbers as in defines.py. That is, it would take the form [latmin,latmax,lonmin,lonmax]. """ #if len( set(['time','lat','lon','lev']) & set([ax.id for ax in allAxes(mv)]) )==0: if len( [ax for ax in allAxes(mv) if ax.isTime() or ax.isLatitude() or ax.isLongitude() or ax.isLevel() ] )==0: return mv # nothing to reduce if vid is None: vid = 'reduced_'+mv.id mvreg = select_region(mv, region) axes = allAxes( mvreg ) #axis_names = [ a.id for a in axes if a.id=='lat' or a.id=='lon' or a.id=='lev'] axis_names = [ a.id for a in axes if a.isLatitude() or a.isLongitude() or a.isLevel() and a.id not in exclude_axes] axes_string = '('+')('.join(axis_names)+')' if len(axes_string)>2: for axis in axes: if axis.getBounds() is None and not (axis.isTime() and hasattr(axis,'climatology')): axis._bounds_ = axis.genGenericBounds() mvsav = cdutil.averager( mvreg, axis=axes_string ) mvtsav = calculate_seasonal_climatology(mvsav, season) mvtsav.id = vid #mvtsav = delete_singleton_axis(mvtsav, vid='time') #mvtsav = delete_singleton_axis(mvtsav, vid='lev') #mvtsav = delete_singleton_axis(mvtsav, vid='lat') #mvtsav = delete_singleton_axis(mvtsav, vid='lon') return mvtsav
ec2005564ccaca881e2737cb8f51f05ba091e64d
3,654,441
import math def fuel_requirement(mass: int) -> int: """Fuel is mass divide by three, round down and subtract 2""" return math.floor(mass / 3) - 2
5899d9260fe7e353c3a1d882f624257d5009248d
3,654,444
def data_head(fname): """ Get the columns-names of the csv Parameters ---------- fname: str Filename of the csv-data Returns ---------- str-list: header-names of the csv-data """ return pd.read_csv(fname, encoding='ISO-8859-1').columns
2b10f0465b30371560a5bc009a2d3a945a80f493
3,654,445
def format(serverDict, sortKeyword='id'): """ Returns an array of nicely formatted servers, sorted by whatever the user prefers, or id by default. """ sortDict = {'id': lambda server: int(server.name[4:-3]), 'uptime': lambda server: server.uptime} sortFunction = sortDict[sortKeyword] class Server: def __init__(self, serverName, dataSet): self.name = str(serverName) self.loadAvgs = dataSet[serverName]['load_avgs'] self.users = dataSet[serverName]['users'] self.uptime = dataSet[serverName]['uptime'] def __str__(self): return str(self.name[:-3]) + " (" + str(self.loadAvgs[1] * 100) + "% mean CPU load, " + str(len(self.users)) + " users online, up for " + cleanTime(self.uptime) + ")" serverList = [] for server in serverDict: serverList.append(Server(server, serverDict)) # Now, sort the list based on the sorting function serverList.sort(key=sortFunction) return serverList
67058d6c0dd6c64a2540be371fa7ba24d081d273
3,654,446
def moray_script(): """ JavaScript関数を公開するためのjsモジュールを生成 Returns: JavaScript関数を公開するためのjsモジュール """ return bottle.static_file('moray.js', root=_root_static_module)
35eebb14902513a2a0e12bf8ce866a8c6d00e193
3,654,447
def load_compdat(wells, buffer, meta, **kwargs): """Load COMPDAT table.""" _ = kwargs dates = meta['DATES'] columns = ['DATE', 'WELL', 'I', 'J', 'K1', 'K2', 'MODE', 'Sat', 'CF', 'DIAM', 'KH', 'SKIN', 'ND', 'DIR', 'Ro'] df = pd.DataFrame(columns=columns) for line in buffer: if '/' not in line: break line = line.split('/')[0].strip() if not line: break vals = line.split() full = [None] * len(columns) full[0] = dates[-1] if not dates.empty else pd.to_datetime('') shift = 1 for i, v in enumerate(vals): if '*' in v: shift += int(v.strip('*')) - 1 else: full[i+shift] = v df = df.append(dict(zip(columns, full)), ignore_index=True) df[['WELL', 'MODE', 'DIR']] = df[['WELL', 'MODE', 'DIR']].applymap( lambda x: x.strip('\'\"') if x is not None else x) df[['I', 'J', 'K1', 'K2']] = df[['I', 'J', 'K1', 'K2']].astype(int) df[['Sat', 'CF', 'DIAM', 'KH', 'Ro']] = df[['Sat', 'CF', 'DIAM', 'KH', 'Ro']].astype(float) for k, v in DEFAULTS.items(): if k in df: df[k] = df[k].fillna(v) if not df.empty: welldata = {k: {'COMPDAT': v.reset_index(drop=True)} for k, v in df.groupby('WELL')} wells.update(welldata, mode='a', ignore_index=True) return wells
fb28b82ba6ad36c3aea45e31c684c9302cdf511c
3,654,448
from typing import Optional def scale_random(a: float, b: float, loc: Optional[float] = None, scale: Optional[float] = None) -> float: """Returns a value from a standard normal truncated to [a, b] with mean loc and standard deviation scale.""" return _default.scale_random(a, b, loc=loc, scale=scale)
3c336cd3c345f0366bd721ff2a3a426853804721
3,654,449
def created_link(dotfile: ResolvedDotfile) -> str: """An output line for a newly-created link. """ return ( co.BOLD + co.BRGREEN + OK + " " + ln(dotfile.installed.disp, dotfile.link_dest) + co.RESET )
9195db9c3ea8f7aa6281017ef62967ef5b07f4f3
3,654,450
def instruction2_task(scr): """ Description of task 1 """ scr.draw_text(text = "Great Work!! "+ "\n\nNow comes your TASK 3: **Consider an image**."+ "\n\nIf you press the spacebar now, an image will "+ "appear at the bottom of the screen. You can use the information from the"+ " image to make any modifications to the translation of the sentence."+ "\n\n***However in certain cases, the image is not related to the sentence "+ "or not present at all.***"+ "\n\nAfter looking at the image, say loudly if you'd like to modify your translation"+ " by saying "+ "\"I'd like to modify my translation.\" or \"I'd keep the same translation\""+ "\nif you would like to stick with your translation."+ "\n\nThe final TASK 4 is to **Say the translation again (modified or not)**."+ "\nPlease press the spacebar to indicate the start of your new translation.\nYou can stop your"+ " recording by pressing the spacebar and moving to the next sentence.", fontsize = 25) return scr
554191b520e1229ffc076bbed1c57f265e0c0964
3,654,451
def recursive_subs(e: sp.Basic, replacements: list[tuple[sp.Symbol, sp.Basic]]) -> sp.Basic: """ Substitute the expressions in ``replacements`` recursively. This might not be necessary in all cases, Sympy's builtin ``subs()`` method should also do this recursively. .. note:: The order of the tuples in ``replacements`` might matter, make sure to order these sensibly in case the expression contains a lot of nested substitutions. Parameters ---------- e : sp.Basic Input expression replacements : list[tuple[sp.Symbol, sp.Basic]] List of replacements: ``symbol, replace`` Returns ------- sp.Basic Substituted expression """ for _ in range(0, len(replacements) + 1): new_e = e.subs(replacements) if new_e == e: return new_e else: e = new_e return new_e
013a203d214eb7c683efdefc2bc0b60781260576
3,654,454
def create_lag_i(df,time_col,colnames,lag): """ the table should be index by i,year """ # prepare names if lag>0: s = "_l" + str(lag) else: s = "_f" + str(-lag) values = [n + s for n in colnames] rename = dict(zip(colnames, values)) # create lags dlag = df.reset_index() \ .assign(t=lambda d: d[time_col] + lag) \ .rename(columns=rename)[['i',time_col] + values] \ .set_index(['i',time_col]) # join and return return(df.join(dlag))
be6d4b390ae66cd83320b2c341ba3c76cfad2bdb
3,654,455
def crop_image(image_array, point, size): """ Cropping the image into the assigned size image_array: numpy array of image size: desirable cropped size return -> cropped image array """ img_height, img_width = point # assigned location in crop # for color image if len(image_array.shape) == 3: image_array = image_array[:, img_height:img_height + size[0], img_width:img_width + size[1]] # for gray image elif len(image_array.shape) == 2: image_array = image_array[img_height:img_height + size[0], img_width:img_width + size[1]] return image_array
8ee684719e3e4fea755466e810c645c1ccf7d7f5
3,654,456
def deg_to_rad(deg): """Convert degrees to radians.""" return deg * pi / 180.0
e07bfcb4a541bddedeb8e9a03d6033b48d65c856
3,654,457
def find_plane_normal(points): """ d - number of dimensions n - number of points :param points: `d x n` array of points :return: normal vector of the best-fit plane through the points """ mean = np.mean(points, axis=1) zero_centre = (points.T - mean.T).T U, s, VT = np.linalg.svd(zero_centre) normal = U[:, -1] return normal
3edd4a848b50cffe9a78c6f75999c79934fd5003
3,654,458
def binary_search(data, target, low, high): """Return True if target is found in indicated portion of a Python list. The search only considers the portion from data[low] to data[high] inclusive. """ if low > high: return False # interval is empty; no match else: mid = (low + high) // 2 if target == data[mid]: # found a matcha return True elif target < data[mid]: # recur on the portion left of the middle return binary_search(data, target, low, mid - 1) else: # recur on the portion right of the middle return binary_search(data, target, mid + 1, high)
4395434aea4862e7fc0cab83867f32955b8fb2a2
3,654,459
import time def ReadUnifiedTreeandHaloCatalog(fname, desiredfields=[], icombinedfile=1,iverbose=1): """ Read Unified Tree and halo catalog from HDF file with base filename fname. Parameters ---------- Returns ------- """ if (icombinedfile): hdffile=h5py.File(fname,'r') #load data sets containing number of snaps headergrpname="Header/" numsnaps=hdffile[headergrpname].attrs["NSnaps"] #allocate memory halodata=[dict() for i in range(numsnaps)] numhalos=[0 for i in range(numsnaps)] atime=[0 for i in range(numsnaps)] tree=[[] for i in range(numsnaps)] cosmodata=dict() unitdata=dict() #load cosmology data cosmogrpname="Cosmology/" fieldnames=[str(n) for n in hdffile[headergrpname+cosmogrpname].attrs.keys()] for fieldname in fieldnames: cosmodata[fieldname]=hdffile[headergrpname+cosmogrpname].attrs[fieldname] #load unit data unitgrpname="Units/" fieldnames=[str(n) for n in hdffile[headergrpname+unitgrpname].attrs.keys()] for fieldname in fieldnames: unitdata[fieldname]=hdffile[headergrpname+unitgrpname].attrs[fieldname] #for each snap load the appropriate group start=time.clock() for i in range(numsnaps): snapgrpname="Snap_%03d/"%(numsnaps-1-i) if (iverbose==1): print("Reading ",snapgrpname) isnap=hdffile[snapgrpname].attrs["Snapnum"] atime[isnap]=hdffile[snapgrpname].attrs["scalefactor"] numhalos[isnap]=hdffile[snapgrpname].attrs["NHalos"] if (len(desiredfields)>0): fieldnames=desiredfields else: fieldnames=[str(n) for n in hdffile[snapgrpname].keys()] for catvalue in fieldnames: halodata[isnap][catvalue]=np.array(hdffile[snapgrpname+catvalue]) hdffile.close() print("read halo data ",time.clock()-start) else : hdffile=h5py.File(fname+".snap_000.hdf.data",'r') numsnaps=int(hdffile["NSnaps"][0]) #get field names fieldnames=[str(n) for n in hdffile.keys()] #clean of header info fieldnames.remove("Snapnum") fieldnames.remove("NSnaps") fieldnames.remove("NHalos") fieldnames.remove("TotalNHalos") fieldnames.remove("scalefactor") if (len(desiredfields)>0): fieldnames=desiredfields hdffile.close() halodata=[[] for i in range(numsnaps)] numhalos=[0 for i in range(numsnaps)] atime=[0 for i in range(numsnaps)] tree=[[] for i in range(numsnaps)] start=time.clock() for i in range(numsnaps): hdffile=h5py.File(fname+".snap_%03d.hdf.data"%(numsnaps-1-i),'r') atime[i]=(hdffile["scalefactor"])[0] numhalos[i]=(hdffile["NHalos"])[0] halodata[i]=dict() for catvalue in fieldnames: halodata[i][catvalue]=np.array(hdffile[catvalue]) hdffile.close() print("read halo data ",time.clock()-start) #lets ignore the tree file for now for i in range(numsnaps): tree[i]=dict() return atime,tree,numhalos,halodata,cosmodata,unitdata if (icombinedfile==1): hdffile=h5py.File(fname+".tree.hdf.data",'r') treefields=["haloID", "Num_progen"] #do be completed for Progenitor list although information is contained in the halo catalog by searching for things with the same head #treefields=["haloID", "Num_progen", "Progen"] for i in range(numsnaps): snapgrpname="Snap_%03d/"%(numsnaps-1-i) tree[i]=dict() for catvalue in treefields: """ if (catvalue==treefields[-1]): tree[i][catvalue]=[[]for j in range(numhalos[i])] for j in range(numhalos[i]): halogrpname=snapgrpname+"/Halo"+str(j) tree[i][catvalue]=np.array(hdffile[halogrpname+catvalue]) else: tree[i][catvalue]=np.array(hdffile[snapgrpname+catvalue]) """ tree[i][catvalue]=np.array(hdffile[snapgrpname+catvalue]) hdffile.close() return atime,tree,numhalos,halodata,cosmodata,unitdata
7efc107d5b6eb8a9747d09108f0e89c0b25bb253
3,654,460
import re def lines_in_pull(pull): """Return a line count for the pull request. To consider both added and deleted, we add them together, but discount the deleted count, on the theory that adding a line is harder than deleting a line (*waves hands very broadly*). """ ignore = r"(/vendor/)|(conf/locale)|(static/fonts)|(test/data/uploads)" lines = 0 files = pull.get_files() for f in files: if re.search(ignore, f.filename): #print("Ignoring file {}".format(f.filename)) continue lines += f.additions + f.deletions//5 if pull.combinedstate == "merged" and lines > 2000: print("*** Large pull: {lines:-6d} lines, {pr.created_at} {pr.number:-4d}: {pr.title}".format(lines=lines, pr=pull)) return lines
24aabd83c24c3f337f07b50c894f5503eadfc252
3,654,461
def get_active_milestones(session, project): """Returns the list of all the active milestones for a given project.""" query = ( session.query(model.Issue.milestone) .filter(model.Issue.project_id == project.id) .filter(model.Issue.status == "Open") .filter(model.Issue.milestone.isnot(None)) ) return sorted([item[0] for item in query.distinct()])
8a4c23ada7b18796ea76c770033320f29c0e8d5d
3,654,463
def set_camera_parameters(cfg): """ Set camera parameters. All values come from the dict generated from the JSON file. :param cfg: JSON instance. :type cam: dict :return: None :rtype: None """ # set camera resolution [width x height] camera = PiCamera() camera.resolution = cfg["stream"]["resolution"] # set camera frame rate [Hz] camera.framerate = cfg["stream"]["framerate"] # exposure mode camera.exposure_mode = cfg["exposure"]["mode"] if cfg["exposure"]["set_iso"]: camera.iso = cfg["exposure"]["iso"] return camera
3bd7b0b410d7a19f486a8e3fc80d50af4caa1734
3,654,464
def get_srl_result_for_instance(srl_dict, instance): """Get SRL output for an instance.""" sent_id = instance.sent_id tokens_gold = instance.tokens srl_output = srl_dict[sent_id] srl_output["words"] = [word for word in srl_output["words"] if word != "\\"] tokens_srl = srl_output['words'] if tokens_srl != tokens_gold: srl2gold_id_map = get_gold_map(tokens_srl, tokens_gold) else: srl2gold_id_map = {i: i for i in range(len(tokens_srl))} return srl_output, srl2gold_id_map
4437e68817469966d70759bf038b68c6b5983745
3,654,465
from typing import List def chop_cells(text: str, max_size: int, position: int = 0) -> List[str]: """Break text in to equal (cell) length strings.""" _get_character_cell_size = get_character_cell_size characters = [ (character, _get_character_cell_size(character)) for character in text ][::-1] total_size = position lines: List[List[str]] = [[]] append = lines[-1].append pop = characters.pop while characters: character, size = pop() if total_size + size > max_size: lines.append([character]) append = lines[-1].append total_size = size else: total_size += size append(character) return ["".join(line) for line in lines]
d8d0bd558b48a43775aed3cb5e15a3889fdc653d
3,654,467
def read_input_field_lonlat( input_file, fld_name, level, conf_in, *, conv_fact=None, crop=0, ): """Read from file and pre-process a field. Returns the field as a Field2D object. Arguments: - input_file: Input netCDF file. - fld_name: Name of the input field used in the input file. - conf_in: Input configuration. Optional arguments: - conv_fact: Conversion factor applied to the field. - crop: cut N pixels off around the domain """ lon, lat = read_lonlat2d( infile=conf_in["infile_lonlat"], name_lon=conf_in["lonlat_names"][0], name_lat=conf_in["lonlat_names"][1], transpose2d=conf_in["infield_transpose"], reduce_grid_res=conf_in["reduce_grid_resolution"], reduce_grid_stride=conf_in["reduce_grid_stride"], ) # Read the raw field from file try: with nc4.Dataset(input_file, "r") as fi: # Strip leading time dimension fld_raw = fi[fld_name][0].astype(np.float32) except Exception as e: err = "Cannot read '{}' from {}\n{}: {}".format( fld_name, input_file, e.__class__.__name__, str(e).strip() ) raise IOError(err) if conf_in["infield_transpose"]: fld_raw = fld_raw.T # SR_TMP < assert lon.shape == fld_raw.shape # SR_TMP > # Shrink domain if crop is not None and crop > 0: fld_raw = fld_raw[crop:-crop, crop:-crop] lon = lon[crop:-crop, crop:-crop] lat = lat[crop:-crop, crop:-crop] # Select level if level is not None: fld_raw = fld_raw[level, :, :] # Apply a conversion factor if conv_fact is not None: fld_raw *= conv_fact # Create a Field2D object fld = Field2D(fld_raw, lon, lat) return fld
2ad31cee8ea26abcb7982fc4f5a9518dd11872c4
3,654,468
def multiply_scenarios(sep, *args): """ Create the cross product of two lists of scenarios """ result = None for scenes in args: if result == None: result = scenes else: total = [] for scena in result: for scenb in scenes: # Create a merged scenario with a concatenated name name = scena[0] + sep + scenb[0] tdict = {} tdict.update(scena[1]) tdict.update(scenb[1]) # If there is a 'P' value, it represents the # probability that we want to use this scenario # If both scenarios list a probability, multiply them. if 'P' in scena[1] and 'P' in scenb[1]: P = scena[1]['P'] * scenb[1]['P'] tdict['P'] = P total.append((name, tdict)) result = total return check_scenarios(result)
ef44d9cfcd01304be2d56215caea676dfc26d01b
3,654,469
def export_output(): """ Returns a function that will return the contents of the first file in a zip file which is not named '_metadata.csv' """ def fn(export: FlexibleDataExport): out = BytesIO() export.file_format = FileFormat.ZIP_CSV export.write_data(out) with ZipFile(out, 'r') as zipfile: names = [name for name in zipfile.namelist() if name != '_metadata.csv'] with zipfile.open(names[0], 'r') as infile: return infile.read().decode('utf-8') yield fn
dd94d996e72d01c287d8a1b57979d47b89e6a207
3,654,470
def compute_total_probability_vector(mix_coeff_matrix, kernel_probability_matrix): """ Computes the total, weighted probability vector using the mixture coefficient matrix and the kernel probability matrix. """ # Start writing code here. The computation for the total probability vector can be # written in one line! total_probability_vector=K.sum(mix_coeff_matrix*kernel_probability_matrix,axis=1, keepdims=True) # Return statement here. return total_probability_vector
9c9d97dd8d7c83be02bb91a9924994c36700cbd8
3,654,471
def mnist_noniid(dataset, num_users): """ Sample non-I.I.D client data from MNIST dataset :param dataset: :param num_users: :return: """ num_shards, num_imgs = 200, 300 idx_shard = [i for i in range(num_shards)] dict_users = {i: np.array([], dtype='int64') for i in range(num_users)} idxs = np.arange(num_shards * num_imgs) labels = dataset.train_labels.numpy() # sort labels idxs_labels = np.vstack((idxs, labels)) idxs_labels = idxs_labels[:, idxs_labels[1, :].argsort()] idxs = idxs_labels[0, :] # divide and assign for i in range(num_users): rand_set = set(np.random.choice(idx_shard, 2, replace=False)) idx_shard = list(set(idx_shard) - rand_set) for rand in rand_set: dict_users[i] = np.concatenate((dict_users[i], idxs[rand * num_imgs:(rand + 1) * num_imgs]), axis=0) return dict_users
8194cf27698d9e721f739ed405f56c8fddbe581a
3,654,472
def first_order_model(nt, rates): """ Returns the first-order model asymptotic solution for a network nt. Takes a list of interaction weigths (in the same order as the list of nodes) as the "rates" argument """ if type(nt) == list: nt = az.transform(nt) M = network_matrix(nt, rates=rates) elif type(nt) == np.ndarray: M = nt nt = None else: M = network_matrix(nt, rates=rates) L, V = np.linalg.eig(M) kmax = np.real(L).argmax() return (np.real(V[:,kmax])/np.real(V[:,kmax]).sum(), az.transform(nt))
8348e68568d3fb9f5236a1e7852f2b1cb8c2860d
3,654,473
import requests def save_to_disk(url, save_path): """ Saves to disk non-destructively (xb option will not overwrite) """ print('Downloading: %s' % url) r = requests.get(url) if r.status_code == 404: print('URL broken, unable to download: %s' % url) return False else: with open(save_path, 'xb') as f: f.write(r.content) return True
c9917a637026d999765364d3c276150681554129
3,654,474
def render_settings_window(s_called, s_int, ntfc_called, ntfc_state, s_state): """ Render the settings window """ win = Settings(s_called, s_int, ntfc_called, ntfc_state, s_state) win.connect("delete-event", Gtk.main_quit) win.show_all() Gtk.main() return win.settings_called, win.interval, win.notifications_called, win.notifications_state
30f5a64b822d408b4f9ca4d83047753fa55eaa58
3,654,476
import json def server(server_id): """ Returns a list of sourcemod servers """ data = {} db_server = ServerModel.select().join(IPModel) db_server = db_server.where(ServerModel.id == server_id).get() server_address = (db_server.ip.address, db_server.port) info = {} try: querier = ServerQuerier(server_address, 1) info = querier.get_info() except NoResponseError: pass players = [] try: players = querier.get_players()["players"] except BrokenMessageError: pass except NoResponseError: pass data["id"] = db_server.id for key in info: data[key] = str(info[key]) data["players"] = [] for player in players: player_data = {} for key in player: if type(player[key]) == str: player_data[key] = player[key].encode('utf8') continue player_data[key] = player[key] data["players"].append(player_data) return json.dumps(data)
a52fd4bbaefefff5e667dd1dc1b06f68b7643810
3,654,477
def atom_hsoc(case, soc): """ Return atomic spin-orbit coupling matrix :math:`\\vec{l}\cdot\\vec{s}` in complex spherical harmonics basis. Parameters ---------- case : str String label indicating atomic shell, - 'p': for :math:`p` -shell. - 't2g': for :math:`t_{2g}` -shell. - 'd': for :math:`d` -shell. - 'f': for :math:`f` -shell. soc : float The strength of spin-orbit coupling. Returns ------- hsoc : 2d complex array The spin-orbit coupling matrix. """ sqrt2 = np.sqrt(2.0) sqrt6 = np.sqrt(6.0) sqrt10 = np.sqrt(10.0) sqrt12 = np.sqrt(12.0) if case.strip() == 'p': hsoc = np.zeros((6, 6), dtype=np.complex128) hsoc[0,0] = -1.0 hsoc[3,0] = sqrt2 hsoc[1,1] = 1.0 hsoc[5,2] = sqrt2 hsoc[0,3] = sqrt2 hsoc[4,4] = 1.0 hsoc[2,5] = sqrt2 hsoc[5,5] = -1.0 return 0.5 * soc * hsoc elif case.strip() == 't2g': hsoc = np.zeros((6, 6), dtype=np.complex128) hsoc[0,0] = -1.0 hsoc[3,0] = sqrt2 hsoc[1,1] = 1.0 hsoc[5,2] = sqrt2 hsoc[0,3] = sqrt2 hsoc[4,4] = 1.0 hsoc[2,5] = sqrt2 hsoc[5,5] = -1.0 return 0.5 * -soc * hsoc elif case.strip() == 'd': hsoc = np.zeros((10, 10), dtype=np.complex128) hsoc[0,0] = -2.0 hsoc[3,0] = 2.0 hsoc[1,1] = 2.0 hsoc[2,2] = -1.0 hsoc[5,2] = sqrt6 hsoc[0,3] = 2.0 hsoc[3,3] = 1.0 hsoc[7,4] = sqrt6 hsoc[2,5] = sqrt6 hsoc[6,6] = 1.0 hsoc[9,6] = 2.0 hsoc[4,7] = sqrt6 hsoc[7,7] = -1.0 hsoc[8,8] = 2.0 hsoc[6,9] = 2.0 hsoc[9,9] = -2.0 return 0.5 * soc * hsoc elif case.strip() == 'f': hsoc = np.zeros((14, 14), dtype=np.complex128) hsoc[0,0 ] = -3.0 hsoc[3,0 ] = sqrt6 hsoc[1,1 ] = 3.0 hsoc[2,2 ] = -2.0 hsoc[5,2 ] = sqrt10 hsoc[0,3 ] = sqrt6 hsoc[3,3 ] = 2.0 hsoc[4,4 ] = -1.0 hsoc[7,4 ] = sqrt12 hsoc[2,5 ] = sqrt10 hsoc[5,5 ] = 1.0 hsoc[9,6 ] = sqrt12 hsoc[4,7 ] = sqrt12 hsoc[8,8 ] = 1.0 hsoc[11,8 ] = sqrt10 hsoc[6,9 ] = sqrt12 hsoc[9,9 ] = -1.0 hsoc[10,10] = 2.0 hsoc[13,10] = sqrt6 hsoc[8,11 ] = sqrt10 hsoc[11,11] = -2.0 hsoc[12,12] = 3.0 hsoc[10,13] = sqrt6 hsoc[13,13] = -3.0 return 0.5 * soc * hsoc else: print("don't support SOC for this case: ", case) return
d1c87105831952746e7b089480058b38c382bcd5
3,654,478
def wcs_to_celestial_frame(wcs): """ For a given WCS, return the coordinate frame that matches the celestial component of the WCS. Parameters ---------- wcs : :class:`~astropy.wcs.WCS` instance The WCS to find the frame for Returns ------- frame : :class:`~astropy.coordinates.baseframe.BaseCoordinateFrame` subclass instance An instance of a :class:`~astropy.coordinates.baseframe.BaseCoordinateFrame` subclass instance that best matches the specified WCS. Notes ----- To extend this function to frames not defined in astropy.coordinates, you can write your own function which should take a :class:`~astropy.wcs.WCS` instance and should return either an instance of a frame, or `None` if no matching frame was found. You can register this function temporarily with:: >>> from astropy.wcs.utils import wcs_to_celestial_frame, custom_frame_mappings >>> with custom_frame_mappings(my_function): ... wcs_to_celestial_frame(...) """ for mapping_set in WCS_FRAME_MAPPINGS: for func in mapping_set: frame = func(wcs) if frame is not None: return frame raise ValueError("Could not determine celestial frame corresponding to " "the specified WCS object")
74f798f0f19566acf9f2115edf47ee2cf262ca0b
3,654,479
def conv2d(x, f=64, k=3, d=1, act=None, pad='SAME', name='conv2d'): """ :param x: input :param f: filters, default 64 :param k: kernel size, default 3 :param d: strides, default 2 :param act: activation function, default None :param pad: padding (valid or same), default same :param name: scope name, default conv2d :return: covn2d net """ return tf.layers.conv2d(x, filters=f, kernel_size=k, strides=d, kernel_initializer=tf.contrib.layers.variance_scaling_initializer(), kernel_regularizer=tf.contrib.layers.l2_regularizer(5e-4), bias_initializer=tf.zeros_initializer(), activation=act, padding=pad, name=name)
86e2b6b9ac21074da460ee2785ef6fca317e0417
3,654,480
def _is_uniform_distributed_cf(cf): """ Check if the provided center frequencies are uniformly distributed. """ return np.any(np.diff(np.diff(cf))!=0)
c8cee1832ff4664839a0adc1263f3ece94673ad7
3,654,481
def build_person(first_name, last_name): """Return a dictionary of information about a person.""" person = {'first': first_name, 'last': last_name} return person
c8da8a5c4d4b7403804eff55e38106bb5921cf06
3,654,482
def radon(image, theta=None): """ Calculates the radon transform of an image given specified projection angles. Parameters ---------- image : array_like, dtype=float Input image. theta : array_like, dtype=float, optional (default np.arange(180)) Projection angles (in degrees). Returns ------- output : ndarray Radon transform (sinogram). """ if image.ndim != 2: raise ValueError('The input image must be 2-D') if theta is None: theta = np.arange(180) height, width = image.shape diagonal = np.sqrt(height**2 + width**2) heightpad = np.ceil(diagonal - height) widthpad = np.ceil(diagonal - width) padded_image = np.zeros((int(height + heightpad), int(width + widthpad))) y0, y1 = int(np.ceil(heightpad / 2)), \ int((np.ceil(heightpad / 2) + height)) x0, x1 = int((np.ceil(widthpad / 2))), \ int((np.ceil(widthpad / 2) + width)) padded_image[y0:y1, x0:x1] = image out = np.zeros((max(padded_image.shape), len(theta))) h, w = padded_image.shape dh, dw = h // 2, w // 2 shift0 = np.array([[1, 0, -dw], [0, 1, -dh], [0, 0, 1]]) shift1 = np.array([[1, 0, dw], [0, 1, dh], [0, 0, 1]]) def build_rotation(theta): T = -np.deg2rad(theta) R = np.array([[np.cos(T), -np.sin(T), 0], [np.sin(T), np.cos(T), 0], [0, 0, 1]]) return shift1.dot(R).dot(shift0) for i in range(len(theta)): rotated = homography(padded_image, build_rotation(-theta[i])) out[:, i] = rotated.sum(0)[::-1] return out
9395e742353def0db9fa26e955d80c31a0c84d55
3,654,483
def build_idrac_table_schemas(metric_definitions: list): """build_table_schemas Build iDRAC Table Schemas Build table schemas based on the idrac telemetry metric definitions Args: metric_definitions (list): idrac telemetry metric definitions Returns: dict: iDRAC table schemas """ table_schemas = {} try: for metric in metric_definitions: table_name = metric['Id'] metric_type = metric['MetricDataType'] metric_unit = metric.get('Units', None) # For network metrics, use BIG INT for storing the metric readings if metric_unit == 'By' or metric_unit == 'Pkt': value_type = 'BIGINT' else: value_type = utils.data_type_mapping.get(metric_type, 'TEXT') column_names = ['Timestamp', 'NodeID', 'Source', 'FQDD', 'Value'] column_types = ['TIMESTAMPTZ NOT NULL', 'INT NOT NULL', 'TEXT', \ 'TEXT', value_type] table_schemas.update({ table_name: { 'column_names': column_names, 'column_types': column_types, } }) except Exception as err: log.error(f"Cannot build idrac table schemas: {err}") return table_schemas
5f7b6b5807f009d56b1f2aabeb86d0ddfcbdf44f
3,654,484
from typing import Tuple def _increasing_randomly_negate_to_arg( level: int, params: Tuple[float, float] ) -> Tuple[float]: """ Convert level to transform magnitude. This assumes transform magnitude increases (or decreases with 50% chance) linearly with level. Args: level (int): Level value. params (Tuple[float, float]): Params contains two values: 1) Base transform magnitude when level is 0; 2) Maxmimum increasing in transform magnitude when level is at maxmimum. """ magnitude = (level / _AUGMENTATION_MAX_LEVEL) * params[1] return (params[0] + _randomly_negate(magnitude),)
a1e9cc220753132cfeb1426967d2cd648bc78fa8
3,654,485
import json import hashlib def hashify(params, max_length=8): """ Create a short hashed string of the given parameters. :param params: A dictionary of key, value pairs for parameters. :param max_length: [optional] The maximum length of the hashed string. """ param_str = json.dumps(params, separators=(',', ':'), sort_keys=True) param_hash = hashlib.md5(param_str.encode('utf-8')).hexdigest() return param_hash[:max_length]
e4a97a28fc2d0564da3e6b22f32735b4a2534c3e
3,654,486
def unique_entries(results): """Prune non-unqiue search results.""" seen = set() clean_results = [] for i in results: if i['code'] not in seen: clean_results.append(i) seen.add(i['code']) return clean_results
c0c55ebd5aa76f3a7f44134a972019c3d26c1c48
3,654,488
def get_q_confidence() -> int: """Get's the user's confidence for the card""" response = input("How confident do you feel about being able to answer this question (from 1-10)? ") if response.isnumeric() & 0 < response <= 10: return int(response) else: print("Incorrect score value, please enter a number from 1 to 10.") # we call the function until it returns the appropriate value get_q_confidence()
e61ceb5676703a795a24f99ee7849a362186ec84
3,654,489
def generate_offices_table(offices, by_office, by_polling_center, election_day, day_after_election_day): """ Pre-compute key data needed for generating election day office reports. """ offices_by_key = {str(office['code']): office for office in offices} rows = [] for key in sorted([key for key in by_office.keys()]): row = by_office[key] key = str(key) # copy name from the offices hash array row['english_name'] = offices_by_key[key]['english_name'] row['arabic_name'] = offices_by_key[key]['arabic_name'] on_election_day = row.get(election_day, {}) # get election day numbers row['opened'] = on_election_day.get('opened', 0) row['votes_reported_1'] = on_election_day.get('1', 0) row['votes_reported_2'] = on_election_day.get('2', 0) row['votes_reported_3'] = on_election_day.get('3', 0) # and aggregate counts row['reported_1'] = on_election_day.get('1_count', 0) row['reported_2'] = on_election_day.get('2_count', 0) row['reported_3'] = on_election_day.get('3_count', 0) # check for late results # We only want late reports for period 4. The JSON data has aggregate # numbers for office by day, but you can't tell which of those values are new reports on # EDAY+1 and which ones are replacements for values given on EDAY, so we have to iterate # through each center to get that info row['votes_reported_4'] = 0 reported_4 = 0 # Which polling centers are in this office? centers = {k: v for k, v in by_polling_center.items() if str(v['office_id']) == key} for center_id, center in centers.items(): if day_after_election_day in center and '4' in center[day_after_election_day]: # found a period 4 report on EDAY+1. Sum the votes and increment the report count row['votes_reported_4'] += center[day_after_election_day]['4'] reported_4 += 1 elif election_day in center and '4' in center[election_day]: # didn't find an EDAY+1 report, so use EDAY, if present row['votes_reported_4'] += center[election_day]['4'] reported_4 += 1 row['reported_4'] = reported_4 # save derived values row['not_opened'] = row['polling_center_count'] - row['opened'] row['not_reported_1'] = row['polling_center_count'] - row['reported_1'] row['not_reported_2'] = row['polling_center_count'] - row['reported_2'] row['not_reported_3'] = row['polling_center_count'] - row['reported_3'] row['not_reported_4'] = row['polling_center_count'] - reported_4 row['closed'] = reported_4 # reporting final tally means center closed rows.append(row) return rows
85111ed67e8f6b8dce71af2844ee865699f3fe01
3,654,490
import time import random import select def bang(nick, chan, message, db, conn, notice): """when there is a duck on the loose use this command to shoot it.""" global game_status, scripters if chan in opt_out: return network = conn.name score = "" out = "" miss = ["You just shot yourself in the foot, the duck laughed at you as it flew off.", "WHOOSH! You missed the duck completely!", "Your gun jammed!", "Better luck next time.", "Your barrel must be bent lol, maybe next time!", "Clearly you're using a BB gun, get a real gun and try again!", "Did you just throw a firecracker? Go buy a shotgun and come back!","Wow, Could you be a worse shot?" ] if not game_status[network][chan]['game_on']: return "There is no activehunt right now. Use @starthunt to start a game." elif game_status[network][chan]['duck_status'] != 1: if game_status[network][chan]['no_duck_kick'] == 1: out = "KICK {} {} The last duck was already nabbed, try again with the next duck.".format(chan, nick) conn.send(out) return return "The last duck was already nabbed, try again with the next duck." else: game_status[network][chan]['shoot_time'] = time() deploy = game_status[network][chan]['duck_time'] shoot = game_status[network][chan]['shoot_time'] if nick.lower() in scripters: if scripters[nick.lower()] > shoot: notice("You are in a cool down period, you can try again in {} seconds.".format(str(scripters[nick.lower()] - shoot))) return chance = hit_or_miss(deploy, shoot) if not random.random() <= chance and chance > .05: out = random.choice(miss) + " You can try again in 3 seconds." scripters[nick.lower()] = shoot + 3 return out if chance == .05: out += "You pulled the trigger in {} seconds, that's mighty fast. Are you running a script for this game? Take a 2 hour cool down.".format(str(shoot - deploy)) scripters[nick.lower()] = shoot + 7200 if not random.random() <= chance: return random.choice(miss) + " " + out else: message(out) game_status[network][chan]['duck_status'] = 2 score = db.execute(select([table.c.shot]) \ .where(table.c.network == conn.name) \ .where(table.c.chan == chan.lower()) \ .where(table.c.name == nick.lower())).fetchone() if score: score = score[0] score += 1 dbupdate(nick, chan, db, conn, score, 0) else: score = 1 dbadd_entry(nick, chan, db, conn, score, 0) timer = "{:.3f}".format(shoot - deploy) duck = "duck" if score == 1 else "ducks" message("{} Perfect aim, you shot the duck in {} seconds! You have killed {} {} in {}.".format(nick, timer, score, duck, chan)) set_ducktime(chan, conn)
78e537caa4c2579226bfbb870a1e37cacd58279e
3,654,491
def pfunc_role_coverage(args): """Another intermediate function for parallelization; as for pfunc_doctor_banding.""" rota = args[0] role = args[1] return rota.get_role_coverage(role)
043ce250b428d443de90c7aa5fa8e8dcc2869303
3,654,492
def parse(s: str) -> Tree: """ Parse PENMAN-notation string *s* into its tree structure. Args: s: a string containing a single PENMAN-serialized graph Returns: The tree structure described by *s*. Example: >>> import penman >>> penman.parse('(b / bark-01 :ARG0 (d / dog))') # noqa Tree(('b', [('/', 'bark-01'), (':ARG0', ('d', [('/', 'dog')]))])) """ tokens = lex(s, pattern=PENMAN_RE) return _parse(tokens)
2a309be1e2a4d8c63130120f9497464811cc6e91
3,654,493
def subtract(v: Vector, w: Vector) -> Vector: """Subtracts corresponding elements""" assert len(v) == len(w), "vectors must be the same length" return [v_i - w_i for v_i, w_i in zip(v, w)]
6e81286b28a178981d970630104ac23bfc606e67
3,654,494
def getWordScore(word, n): """ Returns the score for a word. Assumes the word is a valid word. The score for a word is the sum of the points for letters in the word, multiplied by the length of the word, PLUS 50 points if all n letters are used on the first turn. Letters are scored as in Scrabble; A is worth 1, B is worth 3, C is worth 3, D is worth 2, E is worth 1, and so on (see SCRABBLE_LETTER_VALUES) word: string (lowercase letters) n: integer (HAND_SIZE; i.e., hand size required for additional points) returns: int >= 0 """ result = 0 bonus = 0 if len(word) == n: bonus = 50 for letter in word: result += SCRABBLE_LETTER_VALUES[letter] result *= len(word) result += bonus return result
610ed561edf246cef2bfd9f6cc5e38904bb939ec
3,654,496
def get_commit(): """ Try to return the intended commit / release to deal with. Otherwise raise an acceptable error. 1) it was specified on the command line 2) use the current branch in the target repo """ commit = getattr(env, 'commit', None) or rev_parse('HEAD') if commit is None: raise RuntimeError( 'Unable to ascertain target commit from command line or git repo') return commit
90af53491335a7c616dc7a070394ec7408b7be52
3,654,497
def deg2hms(x): """Transform degrees to *hours:minutes:seconds* strings. Parameters ---------- x : float The degree value c [0, 360) to be written as a sexagesimal string. Returns ------- out : str The input angle written as a sexagesimal string, in the form, hours:minutes:seconds. """ if not 0.0 <= x < 360.0: raise ValueError("Bad RA value in degrees") _h = np.floor(x * 12.0 / 180.0) _m = np.floor((x * 12.0 / 180.0 - _h) * 60.0) _s = ((x * 12.0 / 180.0 - _h) * 60.0 - _m) * 60.0 hms = f"{_h:02.0f}:{_m:02.0f}:{_s:07.4f}" return hms
6572020a71d3abaac42c8826c6248c648535c3a9
3,654,498
def normalise_whitespace(row): """Return table row with normalised white space. This involves stripping leading and trailing whitespace, as well as consolidating white space to single spaces. """ pairs = ( (k, _normalise_cell(v)) for k, v in row.items()) return { k: v for k, v in pairs if not isinstance(v, str) or v}
10a580ef43c1cc47efc709fff05abd98bb332bcf
3,654,499
import struct def test_eap_proto_otp_errors(dev, apdev): """EAP-OTP local error cases""" def otp_handler2(ctx, req): logger.info("otp_handler2 - RX " + req.encode("hex")) if 'num' not in ctx: ctx['num'] = 0 ctx['num'] = ctx['num'] + 1 if 'id' not in ctx: ctx['id'] = 1 ctx['id'] = (ctx['id'] + 1) % 256 idx = 0 idx += 1 if ctx['num'] == idx: logger.info("Test: Challenge included") return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'], 4 + 1 + 1, EAP_TYPE_OTP, ord('A')) return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4) srv = start_radius_server(otp_handler2) try: hapd = start_ap(apdev[0]) with alloc_fail(dev[0], 1, "eap_msg_alloc;eap_otp_process"): dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412", eap="OTP", identity="user", password="password", wait_connect=False) wait_fail_trigger(dev[0], "GET_ALLOC_FAIL") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() finally: stop_radius_server(srv)
c32797121b695ad30f3cb3013a79c0e309d88715
3,654,500
def pivot_proportions(df, groups, responses, weights=1): """ Pivot data to show the breakdown of responses for each group. Parameters: df: a pandas DataFrame with data to be aggregated groups: the name of the column containing the groups to partition by respones: the name of the column that contains responses to aggregate into proportions weights: the statistical weighting associated with each response Returns: a pandas DataFrame containing the proportion of responses within each group """ pivot_data = df[[groups, responses]].assign(weights=weights) pivoted_counts = pd.pivot_table( pivot_data, columns=groups, index=responses, aggfunc='sum' ) pivoted_counts = pivoted_counts['weights'].sort_index(axis=1) return (pivoted_counts / pivoted_counts.sum()).fillna(0)
7bf8cdc199fe800cb1bb280ceb2ffdb489f0d342
3,654,501
def row_stack(a1, a2): """ Stacks data from subsequent sweeps, while padding "empty" columns from subsequent sweeps. Inputs ------ a1: np.array destination array a2: np.array array which is added onto the first array Returns ------- out: np.array stacked destination and additional array, with uniform shape """ [N1, M1] = a1.shape [N2, M2] = a2.shape if M1 > M2: a2 = np.pad(a2, ((0, 0), (0, M1-M2)), mode='constant', constant_values=-9999999) elif M2 < M1: a1 = np.pad(a2, ((0, 0), (0, M2-M1)), mode='constant', constant_values=-9999999) out = np.vstack((a1, a2)) out[out == -9999999] = np.nan return out
4e8961351283a1702bc25349f2523c068cfb5424
3,654,502
def globalPrediction(vid, category_names, vid_probs, predicted_labels): """ Get a matrix of probabilities over the classes for the c3d features of a video. Generate the top 3 predictions from the prob matrix """ anno_list = [] # Idea 1 : To form the hist over the categories, each bin has sum of probs vprobs_sum = vid_probs.sum(axis=0) top_n = vprobs_sum.sort_values(ascending = False)[:3] #counter = collections.Counter(predicted_labels) #top_n = counter.most_common(3) # list of tuples #assert len(top_n)==3 labels = top_n.index.tolist() scores = top_n.values.tolist() for idx,score in enumerate(scores): anno_list.append({'score': score, 'label':labels[idx]}) #for (idx,score) in top_n: # anno_list.append({'score': score, 'label':category_names[idx]}) # Idea 2 : Detect temporal continuity of category predicted. Longer the better # Idea 3 : Count the number of highest votes for top category. (Worse than 1) # If equal votes for >1 category then use Idea 1 # finds the max val index among the columns for each row and the freq of the # occurrence of the column names (in decreasing order) # labels = vid_probs.idxmax(axis=1).value_counts()[:3].index.tolist() # scores = probs_sum[labels].tolist() # for idx,score in enumerate(scores): # anno_list.append({'score': score, 'label':labels[idx]}) return anno_list, vprobs_sum
51676499cbf719874c49b89557d960ed8a136243
3,654,503
def GetApexServerStatus(api_key): """ get the status of Apex Legends servers. :param api_key: The API key to use. Warning You must put either a clickable link to "https://apexlegendsstatus.com" OR have a message such as "Data from apexlegendsstatus.com" when displaying data coming from this API. Your key may be suspended otherwise. """ url = 'https://api.mozambiquehe.re/servers' try: res = get_request(url, {'Authorization': api_key}) response = res[0] if response.status_code == 200: r = response.json() res = ApexTrackerPy.Apexclass.A_Server_Data( row_json=r, elapsed_time=res[1], Origin_login_EU_West=r["Origin_login"]["EU-West"], Origin_login_EU_East=r["Origin_login"]["EU-East"], Origin_login_US_West=r["Origin_login"]["US-West"], Origin_login_US_East=r["Origin_login"]["US-East"], Origin_login_US_Central=r["Origin_login"]["US-Central"], Origin_login_Asia=r["Origin_login"]["Asia"], Origin_login_SouthAmerica=r["Origin_login"]["SouthAmerica"], EA_novafusion_EU_West=r["EA_novafusion"]["EU-West"], EA_novafusion_EU_East=r["EA_novafusion"]["EU-East"], EA_novafusion_US_West=r["EA_novafusion"]["US-West"], EA_novafusion_US_East=r["EA_novafusion"]["US-East"], EA_novafusion_US_Central=r["EA_novafusion"]["US-Central"], EA_novafusion_Asia=r["EA_novafusion"]["Asia"], EA_novafusion_SouthAmerica=r["EA_novafusion"]["SouthAmerica"], EA_accounts_EU_West=r["EA_accounts"]["EU-West"], EA_accounts_EU_East=r["EA_accounts"]["EU-East"], EA_accounts_US_West=r["EA_accounts"]["US-West"], EA_accounts_US_East=r["EA_accounts"]["US-East"], EA_accounts_US_Central=r["EA_accounts"]["US-Central"], EA_accounts_Asia=r["EA_accounts"]["Asia"], EA_accounts_SouthAmerica=r["EA_accounts"]["SouthAmerica"], ApexOauth_Crossplay_EU_West=r["ApexOauth_Crossplay"]["EU-West"], ApexOauth_Crossplay_EU_East=r["ApexOauth_Crossplay"]["EU-East"], ApexOauth_Crossplay_US_West=r["ApexOauth_Crossplay"]["US-West"], ApexOauth_Crossplay_US_East=r["ApexOauth_Crossplay"]["US-East"], ApexOauth_Crossplay_US_Central=r["ApexOauth_Crossplay"]["US-Central"], ApexOauth_Crossplay_Asia=r["ApexOauth_Crossplay"]["Asia"], ApexOauth_Crossplay_SouthAmerica=r["ApexOauth_Crossplay"]["SouthAmerica"], CSServer_Playstation_Network=r["otherPlatforms"]["Playstation-Network"], CSServer_Xbox_Live=r["otherPlatforms"]["Xbox-Live"], ) return res else: raise Exception('HttpError!:The API returned status code '+str(response.status_code)) except Exception as e: raise Exception('HttpError!:An error has occurred during the API call.\n'+str(e))
362ca4e68ffbf395f56ccb6aad65cc9d13ab4545
3,654,504
def construct_mdx(cube_name, rows, columns, contexts=None, suppress=None): """ Method to construct MDX Query from :param cube_name: Name of the Cube :param rows: Dictionary of Dimension Names and Selections :param columns: Dictionary of Dimension Names and Selections (Dimension-MDX, List of Elementnames, Subset, or None) :param contexts: Dictionary of Dimension Names and Selections :param suppress: "Both", "Rows", "Columns" or None :return: Genered MDX Query """ # MDX Skeleton mdx_template = 'SELECT {}{} ON ROWS, {}{} ON COLUMNS FROM [{}] {}' # Suppression mdx_rows_suppress = 'NON EMPTY ' if (suppress in ['Rows', 'Both'] and rows) else '' mdx_columns_suppress = 'NON EMPTY ' if (suppress in ['Columns', 'Both'] and columns) else '' # Rows and Columns mdx_rows = construct_mdx_axis(rows) mdx_columns = construct_mdx_axis(columns) # Context filter (where statement) mdx_where = '' if contexts: mdx_where_parts = ['[{}].[{}]'.format(dim, elem) for dim, elem in contexts.items()] mdx_where += "WHERE (" + ','.join(mdx_where_parts) + ")" # Return Full MDX return mdx_template.format(mdx_rows_suppress, mdx_rows, mdx_columns_suppress, mdx_columns, cube_name, mdx_where)
117d554b71fcb5c065664e51a9064b2edb504ed6
3,654,505
def mock_train_model(spark_context, testserver): """Pre-condition: worker.update_one is assumed to be working.""" inq = Queue() outq = Queue() job = get_job() job['urls'] = [testserver.url] db = get_fake_mongo_client().ophicleide db.models.insert_one(job) inq.put(job) update_model(spark_context, inq, outq, db, 'http://testurl') return db, job['_id']
eb862f8f600a6aa64cb65685f122dd577a6e51df
3,654,506
def calc_number_of_children(*args): """ calc_number_of_children(loc, tif, dont_deref_ptr=False) -> int Calculate max number of lines of a formatted c data, when expanded ( 'PTV_EXPAND' ). @param loc: location of the data ( ALOC_STATIC or ALOC_CUSTOM ) (C++: const argloc_t &) @param tif: type info (C++: const tinfo_t &) @param dont_deref_ptr: consider 'ea' as the ptr value (C++: bool) """ return _ida_typeinf.calc_number_of_children(*args)
cfc7427ec5ff4d0fc78d87d315460c62d130cd3d
3,654,507