content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def get_mfp(g, gv): """Calculate mean free path from inverse lifetime and group velocity.""" g = np.where(g > 0, g, -1) gv_norm = np.sqrt((gv**2).sum(axis=2)) mean_freepath = np.where(g > 0, gv_norm / (2 * 2 * np.pi * g), 0) return mean_freepath
bcef3e92de1b81a8688b3a732dd7af0dd9ce6b8c
3,654,400
from glob import glob import os import pandas as pd def collate_participant_tables(subject_ids, base_dir): """ Generate a pandas dataframe across all subjects Parameters ---------- subject_ids: list a list of subject identifiers in base_dir: str path to a mindboggle output base directory (mindboggled) Returns ------- collated_table : pandas DataFrame rows of subject_ids, and columns of shape measures Examples -------- >>> from mindboggle.mio.tables import collate_participant_tables >>> subject_ids = ['arno', 'arno'] # normally two different subjects >>> base_dir = os.environ['MINDBOGGLE_DATA'] # doctest: +SKIP >>> dft = collate_participant_tables(subject_ids, base_dir) # doctest: +SKIP >>> dft['lcsfs-sylvian fissure-area'] # doctest: +SKIP arno 4.641015 arno 4.641015 Name: lcsfs-sylvian fissure-area, dtype: float64 """ out = None for id in subject_ids: fl = glob(os.path.join(base_dir, id, 'tables', '*.csv')) + \ glob(os.path.join(base_dir, id, 'tables', '*', '*.csv')) # skip vertices outputs dft = pd.concat([fname2df(val) for val in sorted(fl) if 'vertices' not in val], axis=1) dft.index = [id] out = dft if out is None else pd.concat((out, dft), axis=0) return out
eb367b9d790c4d7a5583b65fc12b60c7d7cc6198
3,654,401
import os def convert_to_format(file: str, output: str, output_format: str): """ Converts a HOCON file to another format Parameters ---------- file : str hocon file to convert output : str output file to produce output_format : str format of the output file Returns ------- str the output file """ (pyhocon .converter .HOCONConverter .convert_from_file(file, output_file=output, output_format=output_format)) os.remove(file) return output
e7ec08d9167f30717e7f11cf5e234837b6c2da66
3,654,402
def find_prime_root(l, blum=True, n=1): """Find smallest prime of bit length l satisfying given constraints. Default is to return Blum primes (primes p with p % 4 == 3). Also, a primitive root w is returned of prime order at least n. """ if l == 1: assert not blum assert n == 1 p = 2 w = 1 elif n <= 2: n = 2 w = -1 p = gmpy2.next_prime(2**(l - 1)) if blum: while p % 4 != 3: p = gmpy2.next_prime(p) p = int(p) else: assert blum if not gmpy2.is_prime(n): n = int(gmpy2.next_prime(n)) p = 1 + n * (1 + (n**2) % 4 + 4 * ((2**(l - 2)) // n)) while not gmpy2.is_prime(p): p += 4 * n a = 1 w = 1 while w == 1: a += 1 w = gmpy2.powmod(a, (p - 1) // n, p) p, w = int(p), int(w) return p, n, w
be2d465fdb8de45dc2574788c12b8f78f4601508
3,654,403
import json def set_parameters(_configs, new=False): """ Sets configuration parameters Parameters ---------- _configs : Dictionary containing configuration options from the config file (config.json) new : bool Do you want to start from a new file? Returns ------- _configs : Updated dictionary containing configuration options from the config file (config.json) """ if new: _configs = {x: "NA" for x in _configs} print('*Do not include single or double quotes*\n') if _configs['eye_mask_path'] == 'NA': _eye_mask_path = input('Add the full eye mask filepath: ') _configs['eye_mask_path'] = _eye_mask_path if _configs['train_file'] == 'NA': _train_file = input('Add the name of the file used for training [peer1.nii.gz]: ') if not _train_file: _configs['train_file'] = 'peer1.nii.gz' else: _configs['train_file'] = _train_file if _configs['test_file'] == 'NA': _test_file = input('Which file would you like to predict eye movements from? [movie.nii.gz]: ') if not _test_file: _configs['test_file'] = 'movie.nii.gz' else: _configs['test_file'] = _test_file if _configs['use_gsr'] == 'NA': _use_gsr = input('Use global signal regression? (y/n) [n]: ') if (not _use_gsr) or (_use_gsr == 'n'): _configs['use_gsr'] = "0" else: _configs['use_gsr'] = "1" if _configs['motion_scrub'] == 'NA': _use_ms = input('Use motion scrubbing? (y/n) [n]: ') if (not _use_ms) or (_use_ms == 'n'): _configs['use_ms'] = "0" _configs['motion_threshold'] = "0" _configs['motion_scrub'] = "Not implemented" elif _use_ms == 'y': _configs['use_ms'] = "1" _motion_scrub_filename = input('Add the filename of the CSV that contains the framewise displacement \ time series [motion_ts.csv]: ') if not _motion_scrub_filename: _configs['motion_scrub'] = 'motion_ts.csv' else: _configs['motion_scrub'] = _motion_scrub_filename _motion_threshold = input('Add a motion threshold for motion scrubbing [.2]: ') if not _motion_threshold: _configs['motion_threshold'] = ".2" else: _configs['motion_threshold'] = _motion_threshold with open('peer/config.json', 'w') as f: json.dump(_configs, f) return _configs
7c0d52f5a2ee5df9b54278162570606d684a6a64
3,654,404
import traceback import sys def create_all_files(sizes): """Create all files. Parameters ---------- sizes : a list of lists of the form [(filesize,[block_size_1, block_size_2,...])] Returns ------- List of file names, a dictionary of measurements """ Stats=[]; files=[] try: for file_size,block_sizes in sizes: for block_size in block_sizes: n=block_size m=int(file_size/block_size) assert n*m==file_size , 'file_size=%d is not a multiple of block_size=%d'%(file_size,n) filename='BlockData'+str(file_size) (t_mem,t_disk) = create_file(n,m,filename=filename) Stats.append({'n':n, 'm':m, 't_mem':t_mem, 't_disk':t_disk}) files.append(filename) except: traceback.print_exc(file=sys.stdout) return files, Stats
af3d0c0876b61019acf0e9e61aae266794c66682
3,654,405
import os def read_requirements(filename='requirements.txt'): """Reads the list of requirements from given file. :param filename: Filename to read the requirements from. Uses ``'requirements.txt'`` by default. :return: Requirements as list of strings """ # allow for some leeway with the argument if not filename.startswith('requirements'): filename = 'requirements-' + filename if not os.path.splitext(filename)[1]: filename += '.txt' # no extension, add default def valid_line(line): line = line.strip() return line and not any(line.startswith(p) for p in ('#', '-')) def extract_requirement(line): egg_eq = '#egg=' if egg_eq in line: _, requirement = line.split(egg_eq, 1) return requirement return line with open(filename) as f: lines = f.readlines() return list(map(extract_requirement, filter(valid_line, lines)))
fcade21b1aaff9320b32f5572fc28f8a6d31b5ab
3,654,406
def vgg16(mask_init='1s', mask_scale=1e-2, threshold_fn='binarizer', **kwargs): """VGG 16-layer model (configuration "D").""" model = VGG(make_layers(cfg['D'], mask_init, mask_scale, threshold_fn), mask_init, mask_scale, threshold_fn, **kwargs) return model
fa3a17460988a2c87ca63b287674b9836c7f69ac
3,654,407
def sort(X): """ Return sorted elements of :param:`X` and array of corresponding sorted indices. :param X: Target vector. :type X: :class:`scipy.sparse` of format csr, csc, coo, bsr, dok, lil, dia or :class:`numpy.matrix` """ assert 1 in X.shape, "X should be vector." X = X.flatten().tolist()[0] return sorted(X), sorted(list(range(len(X))), key=X.__getitem__)
a176e2538fd1c0042eefc6962d1b354b7b4ca736
3,654,408
def get_query(sf, query_text, verbose=True): """ Returns a list of lists based on a SOQL query with the fields as the header column in the first list/row """ # execute query for up to 2,000 records gc = sf.query(query_text) records = gc['records'] if verbose: print('Reading from %s object' % records[0]['attributes']['type'], flush=True) headers = list(records[0].keys())[1:] # get the headers return_table = [ [record[heading] for heading in headers] for record in records] return_table.insert(0, headers) # the above is complete unless there are >2,000 records total_read_so_far = len(records) while not gc['done']: if verbose: print('Progress: {} records out of {}'.format( total_read_so_far, gc['totalSize']), flush=True) gc = sf.query_more(gc['nextRecordsUrl'], True) records = gc['records'] total_read_so_far += len(records) next_table = [ [record[heading] for heading in headers] for record in records] return_table.extend(next_table) return return_table
ea93b6652a2d455b368a831d8c6d6b4554023313
3,654,409
import io import csv def strip_blank(contents): """ strip the redundant blank in file contents. """ with io.StringIO(contents) as csvfile: csvreader = csv.reader(csvfile, delimiter=",", quotechar='"') rows = [] for row in csvreader: rows.append(",".join(['"{}"'.format(x.strip()) for x in row])) return "\n".join(rows)
d446f2123aa3cfe3b1966151f323fa1c4e41cb08
3,654,410
def generate_id() -> str: """Generates an uuid v4. :return: Hexadecimal string representation of the uuid. """ return uuid4().hex
674d0bea01f9109e02af787435d7cee5c37f0a5a
3,654,411
def perms_of_length(n, length): """Return all permutations in :math:`S_n` of the given length (i.e., with the specified number of inversion). This uses the algorithm in `<http://webhome.cs.uvic.ca/~ruskey/Publications/Inversion/InversionCAT.pdf>`_. :param n: specifies the permutation group :math:`S_n`. :param length: number of inversions. :rtype: list of :class:`sage.Permutation` """ result = [] def gen(S, l, suffix=[]): if l == 0: result.append(Permutation(S + suffix)) return n = len(S) bin = (n - 1) * (n - 2) / 2 for i in range(n): if n - (i + 1) <= l <= bin + n - (i + 1): x = S[i] gen(S[0:i] + S[i + 1 :], l - n + (i + 1), [x] + suffix) gen(S=list(range(1, n + 1)), l=length) return result
da18a1a8b2dad5a0084f3d557a2cc1018798d33e
3,654,412
def rank_by_entropy(pq, kl=True): """ evaluate kl divergence, wasserstein distance wasserstein: http://pythonhosted.org/pyriemann/_modules/pyriemann/utils/distance.html """ # to avoid Inf cases pq = pq + 0.0000001 pq = pq/pq.sum(axis=0) if kl: # entropy actually can calculate KL divergence final=pq.iloc[:, :-1].apply( lambda x: stats.entropy(x, pq.iloc[:, -1], base=2), axis=0) label = 'KL' else: # JS divergence final=pq.iloc[:, :-1].apply( lambda x: JSD(x, pq.iloc[:, -1]), axis=0) label = 'JSD' final.sort_values(ascending=False, inplace=True) rank = final.rank(ascending=False) final = pd.concat([final, rank], axis=1) final.columns = [label, 'rank'] return final
0b47e2ba8de66148a50dbb1b4637897ac7bdee4b
3,654,413
def generate_graph_properties(networks): """ This function constructs lists with centrality rankings of nodes in multiple networks. Instead of using the absolute degree or betweenness centrality, this takes metric bias into account. If the graph is not connected, the values are calculated for the largest connected component. :param networks: List of input networks :return: Pandas dataframe with rankings """ properties = dict() property_names = ['Assortativity', 'Connectivity', 'Diameter', 'Radius', 'Average shortest path length'] for property in property_names: properties[property] = list() for network in networks: if len(network[1].nodes) > 0: properties['Assortativity'].append((network[0], nx.degree_pearson_correlation_coefficient(network[1]))) properties['Connectivity'].append((network[0], nx.average_node_connectivity(network[1]))) if nx.is_connected(network[1]): properties['Diameter'].append((network[0], nx.diameter(network[1]))) properties['Radius'].append((network[0], nx.radius(network[1]))) properties['Average shortest path length'].append((network[0], nx.average_shortest_path_length(network[1]))) else: components = list(nx.connected_components(network[1])) sizes = [] for component in components: sizes.append(len(component)) subnetwork = nx.subgraph(network[1], components[np.where(np.max(sizes) == sizes)[0][0]]) properties['Diameter'].append((network[0], nx.diameter(subnetwork))) properties['Radius'].append((network[0], nx.radius(subnetwork))) properties['Average shortest path length'].append((network[0], nx.average_shortest_path_length(subnetwork))) else: properties['Assortativity'].append(None) properties['Connectivity'].append(None) properties['Diameter'].append(None) properties['Radius'].append(None) properties['Average shortest path length'].append(None) return properties
e135c4211d924ab9f1af6baec06b8b313a96b11f
3,654,414
def anova_old( expression, gene_id, photoperiod_set, strain_set, time_point_set, num_replicates ): """One-way analysis of variance (ANOVA) using F-test.""" num_groups = len(photoperiod_set) * len(strain_set) * len(time_point_set) group_size = num_replicates total_expression = 0 # First scan: calculate overall average. for pp in photoperiod_set: for ss in strain_set: for tt in time_point_set: total_expression += sum(expression[(gene_id, pp, ss, tt)]) overall_avg = total_expression / num_groups / group_size # Second scan: calculate variances. in_group_var = 0 bt_group_var = 0 for pp in photoperiod_set: for ss in strain_set: for tt in time_point_set: group = expression[(gene_id, pp, ss, tt)] group_avg = sum(group) / group_size in_group_var += group_size * (group_avg - overall_avg) ** 2 for element in group: bt_group_var += (element - group_avg) ** 2 dof = (num_groups - 1, group_size * num_groups - num_groups) f_stat = bt_group_var / dof[0] / in_group_var * dof[1] return f_stat, dof
f809e0e2be877e1a0f21ca1e05a7079db80254a1
3,654,415
import struct def _make_ext_reader(ext_bits, ext_mask): """Helper for Stroke and ControlPoint parsing. Returns: - function reader(file) -> list<extension values> - function writer(file, values) - dict mapping extension_name -> extension_index """ # Make struct packing strings from the extension details infos = [] while ext_mask: bit = ext_mask & ~(ext_mask-1) ext_mask = ext_mask ^ bit try: info = ext_bits[bit] except KeyError: info = ext_bits['unknown'](bit) infos.append(info) print(infos) if len(infos) == 0: print("[_make_ext_reader lambda] f:", f) return (lambda f: [], lambda f, vs: None, {}) fmt = '<' + ''.join(info[1] for info in infos) names = [info[0] for info in infos] if '@' in fmt: # struct.unpack isn't general enough to do the job fmts = ['<'+info[1] for info in infos] def reader(f, fmts=fmts): print("[_make_ext_reader reader 1] f:", f, "fmt:", fmt) values = [None] * len(fmts) for i,fmt in enumerate(fmts): if fmt == '<@': nbytes, = struct.unpack('<I', f.read(4)) values[i] = f.read(nbytes) else: values[i], = struct.unpack(fmt, f.read(4)) else: def reader(f, fmt=fmt, nbytes=len(infos)*4): print("[_make_ext_reader reader 2] f:", f, "fmt:", fmt, "nbytes:", nbytes) values = list(struct.unpack(fmt, f.read(nbytes))) print("values", values) return values def writer(f, values, fmt=fmt): print("[_make_ext_reader writer] f:", f, "values:", values, "fmt:", fmt) return f.write(struct.pack(fmt, *values)) lookup = dict( (name,i) for (i,name) in enumerate(names) ) return reader, writer, lookup
2f85ab0f09d5a4cbd2aad7a9819440b610bcf20c
3,654,416
def resolve_covariant(n_total, covariant=None): """Resolves a covariant in the following cases: - If a covariant is not provided a diagonal matrix of 1s is generated, and symmetry is checked via a comparison with the datasets transpose - If a covariant is provided, the symmetry is checked args: n_total {int} -- total number of informative features covariant {[type]} -- [description] (default: {None}) returns: covariant {np_array} """ if covariant is None: print("No covariant provided, generating one.") covariant = np.diag(np.ones(n_total)) # test for symmetry on covariance matrix by comparing the matrix to its transpose try: assert np.all(covariant == covariant.T) except AssertionError: print("Assertion error - please check covariance matrix is symmetric.") return covariant
cd32136786d36e88204574a739006239312bb99e
3,654,417
from typing import Optional from typing import Union def create_generic_constant( type_spec: Optional[computation_types.Type], scalar_value: Union[int, float]) -> building_blocks.ComputationBuildingBlock: """Creates constant for a combination of federated, tuple and tensor types. Args: type_spec: A `computation_types.Type` containing only federated, tuple or tensor types, or `None` to use to construct a generic constant. scalar_value: The scalar value we wish this constant to have. Returns: Instance of `building_blocks.ComputationBuildingBlock` representing `scalar_value` packed into `type_spec`. Raises: TypeError: If types don't match their specification in the args section. Notice validation of consistency of `type_spec` with `scalar_value` is not the rsponsibility of this function. """ if type_spec is None: return create_tensorflow_constant(type_spec, scalar_value) py_typecheck.check_type(type_spec, computation_types.Type) inferred_scalar_value_type = type_conversions.infer_type(scalar_value) if (not inferred_scalar_value_type.is_tensor() or inferred_scalar_value_type.shape != tf.TensorShape(())): raise TypeError( 'Must pass a scalar value to `create_generic_constant`; encountered a ' 'value {}'.format(scalar_value)) if not type_analysis.contains_only( type_spec, lambda t: t.is_federated() or t.is_struct() or t.is_tensor()): raise TypeError if type_analysis.contains_only(type_spec, lambda t: t.is_struct() or t.is_tensor()): return create_tensorflow_constant(type_spec, scalar_value) elif type_spec.is_federated(): unplaced_zero = create_tensorflow_constant(type_spec.member, scalar_value) if type_spec.placement == placements.CLIENTS: placement_federated_type = computation_types.FederatedType( type_spec.member, type_spec.placement, all_equal=True) placement_fn_type = computation_types.FunctionType( type_spec.member, placement_federated_type) placement_function = building_blocks.Intrinsic( intrinsic_defs.FEDERATED_VALUE_AT_CLIENTS.uri, placement_fn_type) elif type_spec.placement == placements.SERVER: placement_federated_type = computation_types.FederatedType( type_spec.member, type_spec.placement, all_equal=True) placement_fn_type = computation_types.FunctionType( type_spec.member, placement_federated_type) placement_function = building_blocks.Intrinsic( intrinsic_defs.FEDERATED_VALUE_AT_SERVER.uri, placement_fn_type) return building_blocks.Call(placement_function, unplaced_zero) elif type_spec.is_struct(): elements = [] for k in range(len(type_spec)): elements.append(create_generic_constant(type_spec[k], scalar_value)) names = [name for name, _ in structure.iter_elements(type_spec)] packed_elements = building_blocks.Struct(elements) named_tuple = create_named_tuple(packed_elements, names, type_spec.python_container) return named_tuple else: raise ValueError( 'The type_spec {} has slipped through all our ' 'generic constant cases, and failed to raise.'.format(type_spec))
e440ef6470eacd66fc51210f288c3bf3c14486c6
3,654,418
def all_same(lst: list) -> bool: """test if all list entries are the same""" return lst[1:] == lst[:-1]
4ef42fc65d64bc76ab1f56d6e03def4cb61cf6f0
3,654,419
def binary_find(N, x, array): """ Binary search :param N: size of the array :param x: value :param array: array :return: position where it is found. -1 if it is not found """ lower = 0 upper = N while (lower + 1) < upper: mid = int((lower + upper) / 2) if x < array[mid]: upper = mid else: lower = mid if array[lower] <= x: return lower return -1
ed6e7cc15de238381dbf65eb6c981676fd0525f5
3,654,420
def _add_data_entity(app_context, entity_type, data): """Insert new entity into a given namespace.""" old_namespace = namespace_manager.get_namespace() try: namespace_manager.set_namespace(app_context.get_namespace_name()) new_object = entity_type() new_object.data = data new_object.put() return new_object finally: namespace_manager.set_namespace(old_namespace)
864e12973ad7cfd4c89fbefb211b8b940913590f
3,654,421
def scalarmat(*q): """multiplies every object in q with each object in q. Should return a unity matrix for an orthonormal system""" ret=[] for a in q: toa=[] for b in q: toa.append(a*b) ret.append(toa) return ret
a61c813b548f1934e16517efc4d203c6390097fe
3,654,422
import time def frames_per_second(): """ Return the estimated frames per second Returns the current estimate for frames-per-second (FPS). FPS is estimated by measured the amount of time that has elapsed since this function was previously called. The FPS estimate is low-pass filtered to reduce noise. This function is intended to be called one time for every iteration of the program's main loop. Returns ------- fps : float Estimated frames-per-second. This value is low-pass filtered to reduce noise. """ global _time_prev, _fps time_now = time.time() * 1000.0 dt = time_now - _time_prev _time_prev = time_now if dt == 0.0: return _fps.value return _fps.update(1000.0 / dt)
0ac78e052d1e3f4d09a332bd71df041f14a46111
3,654,423
def modularity(partition, graph, weight='weight'): """Compute the modularity of a partition of a graph Parameters ---------- partition : dict the partition of the nodes, i.e a dictionary where keys are their nodes and values the communities graph : networkx.Graph the networkx graph which is decomposed weight : str, optional the key in graph to use as weight. Default to 'weight' Returns ------- modularity : float The modularity Raises ------ KeyError If the partition is not a partition of all graph nodes ValueError If the graph has no link TypeError If graph is not a networkx.Graph References ---------- .. 1. Newman, M.E.J. & Girvan, M. Finding and evaluating community structure in networks. Physical Review E 69, 26113(2004). Examples -------- >>> G=nx.erdos_renyi_graph(100, 0.01) >>> part = best_partition(G) >>> modularity(part, G) if type(graph) != nx.Graph: raise TypeError("Bad graph type, use only non directed graph") """ inc = dict([]) deg = dict([]) links = graph.size(weight=weight) if links == 0: raise ValueError("A graph without link has an undefined modularity") for node in graph: com = partition[node] deg[com] = deg.get(com, 0.) + graph.degree(node, weight=weight) for neighbor, datas in graph[node].items(): edge_weight = datas.get(weight, 1) if partition[neighbor] == com: if neighbor == node: inc[com] = inc.get(com, 0.) + float(edge_weight) else: inc[com] = inc.get(com, 0.) + float(edge_weight) / 2. res = 0. for com in set(partition.values()): res += inc.get(com, 0.) - \ ((deg.get(com, 0.) ** 2) / (4. * links)) return (1.0 / links) * res
371c3f5e362114896bf0559efe452d79af6e79f8
3,654,424
def config_lst_bin_files(data_files, dlst=None, atol=1e-10, lst_start=0.0, fixed_lst_start=False, verbose=True, ntimes_per_file=60): """ Configure lst grid, starting LST and output files given input data files and LSTbin params. Parameters ---------- data_files : type=list of lists: nested set of lists, with each nested list containing paths to miriad files from a particular night. These files should be sorted by ascending Julian Date. Frequency axis of each file must be identical. dlst : type=float, LST bin width. If None, will get this from the first file in data_files. lst_start : type=float, starting LST for binner as it sweeps from lst_start to lst_start + 2pi. fixed_lst_start : type=bool, if True, LST grid starts at lst_start, regardless of LST of first data record. Otherwise, LST grid starts at LST of first data record. ntimes_per_file : type=int, number of LST bins in a single output file Returns (lst_grid, dlst, file_lsts, start_lst) ------- lst_grid : float ndarray holding LST bin centers dlst : float, LST bin width of output lst_grid file_lsts : list, contains the lst grid of each output file start_lst : float, starting lst for LST binner """ # get dlst from first data file if None if dlst is None: start, stop, int_time = utils.get_miriad_times(data_files[0][0]) dlst = int_time # get start and stop times for each list of files in data_files. # add_int_buffer adds an integration to the end time of df[:-1] files, # and the %(2pi) ensures everything is within a 2pi LST grid. data_times = [] for df in data_files: data_times.append(np.array(utils.get_miriad_times(df, add_int_buffer=True))[:2, :].T % (2 * np.pi)) # unwrap data_times less than lst_start, get starting and ending lst start_lst = 100 end_lst = -1 for dt in data_times: # unwrap starts below lst_start dt[:, 0][dt[:, 0] < lst_start - atol] += 2 * np.pi # unwrap ends below starts dt[:, 1][dt[:, 1] < dt[:, 0] - atol] += 2 * np.pi # get start and end lst start_lst = np.min(np.append(start_lst, dt[:, 0])) end_lst = np.max(np.append(end_lst, dt.ravel())) # ensure start_lst isn't beyond 2pi if start_lst >= (2 * np.pi): start_lst -= 2 * np.pi end_lst -= 2 * np.pi for dt in data_times: dt -= 2 * np.pi # create lst_grid if fixed_lst_start: start_lst = lst_start lst_grid = make_lst_grid(dlst, lst_start=start_lst, verbose=verbose) dlst = np.median(np.diff(lst_grid)) # get starting and stopping lst_grid indices start_diff = lst_grid - start_lst start_diff[start_diff < -dlst / 2 - atol] = 100 start_index = np.argmin(start_diff) end_diff = lst_grid - end_lst end_diff[end_diff > dlst / 2 + atol] = -100 end_index = np.argmax(end_diff) # get number of output files nfiles = int(np.ceil(float(end_index - start_index) / ntimes_per_file)) # get output file lsts file_lsts = [lst_grid[start_index:end_index][ntimes_per_file * i:ntimes_per_file * (i + 1)] for i in range(nfiles)] return data_times, lst_grid, dlst, file_lsts, start_lst
b91cd59bf8d9693bb255c10ef9fb5ce3ef219a41
3,654,425
def get_str_arr_info(val): """ Find type of string in array val, and also the min and max length. Return None if val does not contain strings.""" fval = np.array(val).flatten() num_el = len(fval) max_length = 0 total_length = 0 for sval in fval: len_sval = len(sval) if len_sval > max_length: max_length = len_sval total_length += len_sval return (num_el, max_length, total_length)
283233c780379ca637f621510fa09c359ff53784
3,654,426
import torch def generate_priors(image_size=300, layer_sizes=None, pool_ratios=None, min_sizes=None, max_sizes=None, aspect_ratios=None): # TODO update feature maps, min_sizes, max_sizes for inputs size 5xx """ This method generate prior boxes for SSD Model. In total, there will be 8732 prior boxes :param image_size: input image size for SSD Model :param layer_sizes: Layer sizes for each feature map :param pool_ratios: pooling ratio for each feature map. layer_size*pool_ratio = image_size :param min_sizes: minimum prior box size :param max_sizes: maximum prior box size :param aspect_ratios: ratio for prior box height and width :return: tensor of prior boxes """ if aspect_ratios is None: aspect_ratios = [[2], [2, 3], [2, 3], [2, 3], [2], [2]] if min_sizes is None: min_sizes = [30, 60, 111, 162, 213, 264] if max_sizes is None: max_sizes = [60, 111, 162, 213, 264, 315] if pool_ratios is None: pool_ratios = [8, 16, 32, 64, 100, 300] if layer_sizes is None: layer_sizes = [38, 19, 10, 5, 3, 1] boxes = [] for layer_size_idx, layer_size in enumerate(layer_sizes): min_size = min_sizes[layer_size_idx] max_size = max_sizes[layer_size_idx] pool_ratio = pool_ratios[layer_size_idx] for layer_height in range(layer_size): for layer_width in range(layer_size): layer_image_size = image_size / pool_ratio center_dim_x = (layer_width + 0.5) / layer_image_size center_dim_y = (layer_height + 0.5) / layer_image_size layer_min_size = min_size / image_size boxes += [center_dim_x, center_dim_y, layer_min_size, layer_min_size] diagonal = sqrt(layer_min_size * (max_size/image_size)) boxes += [center_dim_x, center_dim_y, diagonal, diagonal] for ar in aspect_ratios[layer_size_idx]: boxes += [center_dim_x, center_dim_y, layer_min_size * sqrt(ar), layer_min_size / sqrt(ar)] boxes += [center_dim_x, center_dim_y, layer_min_size / sqrt(ar), layer_min_size * sqrt(ar)] output = torch.Tensor(boxes).view(-1, 4).clamp_(min=0, max=1) output.clamp_(max=1, min=0) return output
208fa7402f6260d21cb23893cd178fac09ba5739
3,654,427
import os def listFiles(dir): """ Walks the path and subdirectories to return a list of files. Parameters ---------- dir : str the top directory to search subdirectories are also searched Returns ------- listname: list a list of files in dir and subdirectories Notes ----- This can be replaced by functions in `os.path`, as if 3.4, pathlib is probably better. It is not clear that this function is used anywhere in ChiantiPy """ alist = os.walk(dir) listname = [] for (dirpath,dirnames,filenames) in alist: if len(dirnames) == 0: for f in filenames: file = os.path.join(dirpath,f) if os.path.isfile(file): listname.append(file) else: for f in filenames: file = os.path.join(dirpath,f) if os.path.isfile(file): listname.append(file) return listname
e2cc32ffd29971fc11df0378f02f801932234569
3,654,428
from typing import Callable from typing import Any def wrap( module: nn.Module, cls: Callable = FullyShardedDataParallel, activation_checkpoint: bool = False, **wrap_overrides: Any ) -> nn.Module: """ Annotate that a module should be wrapped. Annotated modules will only be wrapped if inside of an :func:`enable_wrap` context manager. An important use case is annotating large layers that should be sharded (in-place) during initialization, to avoid running out of system memory. Usage:: with enable_wrap(**params): # Wraps layer in FSDP by default if within context self.l1 = wrap(torch.nn.Linear(5, 5)) Args: module (nn.Module): module to wrap (if in :func:`enable_wrap` context) cls (Callable): class wrapper to wrap the model with if in context (default: :class:`FullyShardedDataParallel`) activation_checkpoint (bool): use activation checkpointing wrapper (default: False) **wrap_overrides: configuration overrides that will take priority over the values provided by the :func:`enable_wrap` context """ if ConfigAutoWrap.in_autowrap_context: wrap_overrides = {**ConfigAutoWrap.kwargs, **wrap_overrides} if activation_checkpoint: module = checkpoint_wrapper(module) return cls(module, **wrap_overrides) return module
cdf313b9100ee2a2f3a9d3ed47fafa76dea16b74
3,654,429
def _is_multiple_state(state_size): """Check whether the state_size contains multiple states.""" return (hasattr(state_size, '__len__') and not isinstance(state_size, tensor_shape.TensorShape))
f034b2a4656edf72be515d99093efc3b03591af0
3,654,430
def deque_to_yaml(representer, node): """Convert collections.deque to YAML""" return representer.represent_sequence("!collections.deque", (list(node), node.maxlen))
5ff503b4f21af58cf96d26171e078ddd5d754141
3,654,431
from bs4 import BeautifulSoup def parse_webpage(url, page_no): """ Parses the given webpage using 'BeautifulSoup' and returns html content of that webpage. """ page = urllib2.urlopen(url + page_no) parsed_page = BeautifulSoup(page, 'html.parser') return parsed_page
774046c85cc38f3575cabc473c93b92b6dbc3d25
3,654,432
import random def randomDigits(length=8): """ 生成随机数字串 randomDigits() ==> 73048139 """ return ''.join([random.choice(digits) for _ in range(length)])
cb4200ea4d6850888461880bc3d9cc0ea6804993
3,654,433
from imcsdk.mometa.adaptor.AdaptorGenProfile import AdaptorGenProfile import time def adaptor_set_all(handle, adaptors=None, server_id=1, **kwargs): """ Example: adaptor_set_all(handle, adaptors=[ {id: 1, lldp: "enabled", fip_mode: "enabled", port_channel_enable: "enabled", vntag_mode: "enabled", admin_action:None} ] ) """ api = 'adaptor_set_all' api_error_msg = VicConst.ADAPTOR_ERROR_MSG if not adaptors: log.debug("No adapters present for configuration") return # fetch adaptors from end point, adaptor_ep_dict is dict {id, adaptor_mo} adaptor_ep_dict = _prepare_ep_adaptor_dict(handle, api_error_msg) # validate input and checks if adaptor exists at end point for adaptor in adaptors: id = adaptor.get('id', None) if id is None: raise ImcOperationError( api_error_msg, 'Provide adapter slot to configure') if id not in adaptor_ep_dict: raise ImcOperationError( api_error_msg, "Adaptor %s is not present at end point." % id) # configure adapter mos = [] restart_server = None adaptor_list = [] #adaptors are the configured adaptors in intersight AdaptorConfiguration Policy for adaptor in adaptors: id = adaptor['id'] lldp = adaptor.pop('lldp', None) fip_mode = adaptor.pop('fip_mode', None) port_channel_enable = adaptor.pop('port_channel_enable', None) log.debug("Adapter Config Policy - configured Values") log.debug("Port Channel: %s, LLDP Mode: %s, Fip Mode: %s", port_channel_enable, lldp, fip_mode) # vntag_mode = adaptor.pop('vntag_mode', None) # admin_state = adaptor.pop('admin_state', None) mo = adaptor_ep_dict[id] adaptor_properties = adaptor_properties_get(handle, id, server_id=1) adaptor_list.append(adaptor_properties) #port_channel_capable returns None for < Gen4 adapters and False for Gen4+ unsupported portchannel adapters. #Hence a check has to be done for both None and False #for backward compatibility in deploying Adapter Configuration Policy. if adaptor_properties.port_channel_capable == None or adaptor_properties.port_channel_capable == "False": log.debug("Port Channel is not supported for the adapter at slot: %s", adaptor_properties.pci_slot) port_channel_enable = None if adaptor_properties.port_channel_capable == "True" and port_channel_enable == "disabled": log.debug("Port Channel is disabled by user for adapter at slot %s. Server restart initiated", adaptor_properties.pci_slot) restart_server=True mo.admin_state = AdaptorUnitConsts.ADMIN_STATE_ADAPTOR_RESET_DEFAULT if port_channel_enable == "disabled": AdaptorGenProfile(parent_mo_or_dn=mo, lldp=lldp, fip_mode=fip_mode, port_channel_enable="disabled", vntag_mode="disabled") else: #port_channel_enable value is set to enabled by default. # Hence, its not required to send the default value. AdaptorGenProfile(parent_mo_or_dn=mo, lldp=lldp, fip_mode=fip_mode, vntag_mode="disabled") mos.append(mo) response = handle.set_mos(mos) ret = [] if response: ret.append(_process_response(response, api, api_error_msg)) ext_ethif_adaptor_mos = [] for adaptor in adaptors: id = adaptor['id'] ext_ethifs = adaptor.pop('ext_ethifs', None) if ext_ethifs: mo = adaptor_ep_dict[id] ext_ethif_mos = _ext_ethif_set_all(handle, ext_ethifs, mo) ext_ethif_adaptor_mos.extend(ext_ethif_mos) if len(ext_ethif_adaptor_mos) > 0: response = handle.set_mos(ext_ethif_adaptor_mos) if response: error_msg = VicConst.DCE_IF_ERROR_MSG ret.append(_process_response(response, api, error_msg)) results = {} results["changed"] = True results["msg"] = "" results["msg_params"] = ret #Power Cycle Host for the changes to take effect. if restart_server: log.debug("Restarting server...") server_power_cycle(handle, timeout=180) _wait_for_power_state(handle, state="on", timeout=60, interval=5, server_id=1) log.debug("Server restarted successfully. Adaptor initialisation check in progress.") for adaptor in adaptor_list: adaptor_initialization_in_progress = True wait_count = 0 while adaptor_initialization_in_progress and wait_count < 5: try: adaptor = _get_mo(handle, dn=adaptor.dn) adaptor_initialization_in_progress = False log.debug("Adaptor at slot %s initialisation complete.", adaptor.pci_slot) except ImcOperationError: log.debug("Adaptor at slot %s initialisation in progress. Sleep for 5s.", adaptor.pci_slot) wait_count += 1 time.sleep(5) if adaptor_initialization_in_progress: log.debug("Adaptor initialisation failure for adaptor at slot %s", adaptor.pci_slot) raise ImcOperationError( api_error_msg, "Adaptor %s is not initialised at end point." % adaptor.pci_slot) log.debug("Sleeping for 1 minute") time.sleep(60) log.debug("Returning results") return results
cc36c2e2104f74ed4a1e2239f979d89d42691cf9
3,654,434
from typing import Callable def some_func(string: str, function: Callable) -> bool: """Check if some elements in a string match the function (functional). Args: string: <str> string to verify. function: <callable> function to call. Returns: True if some of elements are in the sequence are True. Examples: >>> assert some_func('abcdefg&%$', str.isalpha) >>> assert not some_func('&%$=', str.isalpha) """ return any(map(function, string)) and not all(map(function, string))
e67af6613975a6757905087397ff8b68e83ddbf6
3,654,435
def UseExceptions(*args): """UseExceptions()""" return _ogr.UseExceptions(*args)
71a8e36c0554796298a5e8c9a3e88bf423acef5b
3,654,436
def get_mlm_logits(input_tensor, albert_config, mlm_positions, output_weights): """From run_pretraining.py.""" input_tensor = gather_indexes(input_tensor, mlm_positions) with tf.variable_scope("cls/predictions"): # We apply one more non-linear transformation before the output layer. # This matrix is not used after pre-training. with tf.variable_scope("transform"): input_tensor = tf.layers.dense( input_tensor, units=albert_config.embedding_size, activation=modeling.get_activation(albert_config.hidden_act), kernel_initializer=modeling.create_initializer( albert_config.initializer_range)) input_tensor = modeling.layer_norm(input_tensor) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. output_bias = tf.get_variable( "output_bias", shape=[albert_config.vocab_size], initializer=tf.zeros_initializer()) logits = tf.matmul( input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) return logits
36a2f10fe33aea371fcbf23ac856bf910998e1c9
3,654,437
def spm_hrf(TR, t1=6, t2=16, d1=1, d2=1, ratio=6, onset=0, kernel=32): """Python implementation of spm_hrf.m from the SPM software. Parameters ---------- TR : float Repetition time at which to generate the HRF (in seconds). t1 : float (default=6) Delay of response relative to onset (in seconds). t2 : float (default=16) Delay of undershoot relative to onset (in seconds). d1 : float (default=1) Dispersion of response. d2 : float (default=1) Dispersion of undershoot. ratio : float (default=6) Ratio of response to undershoot. onset : float (default=0) Onset of hemodynamic response (in seconds). kernel : float (default=32) Length of kernel (in seconds). Returns ------- hrf : array Hemodynamic repsonse function References ---------- [1] Adapted from the Poldrack lab fMRI tools. https://github.com/poldracklab/poldracklab-base/blob/master/fmri/spm_hrf.py """ ## Define metadata. fMRI_T = 16.0 TR = float(TR) ## Define times. dt = TR/fMRI_T u = np.arange(kernel/dt + 1) - onset/dt ## Generate (super-sampled) HRF. hrf = gamma(t1/d1,scale=1.0/(dt/d1)).pdf(u) - gamma(t2/d2,scale=1.0/(dt/d2)).pdf(u)/ratio ## Downsample. good_pts=np.array(range(np.int(kernel/TR)))*fMRI_T hrf=hrf[good_pts.astype(int)] ## Normalize and return. hrf = hrf/np.sum(hrf) return hrf
be07acb0980000a59f4df39f0ab7147dbb5d258e
3,654,438
def prob_active_neuron(activity_matrix): """Get expected co-occurrence under independence assumption. Parameters ---------- activity_matrix : np.array num_neurons by num_bins, boolean (1 or 0) Returns ------- prob_active : np.array Fraction of bins each cell participates in individually """ prob_active = np.mean(activity_matrix, axis=1) return prob_active
fd5eb513598d840602117adb0223c75b71660f8a
3,654,439
def translate_x(image: tf.Tensor, pixels: int, replace: int) -> tf.Tensor: """Equivalent of PIL Translate in X dimension.""" image = translate(wrap(image), [-pixels, 0]) return unwrap(image, replace)
53ea2bf905487a310d6271b37adef0523bcdf4de
3,654,440
def reduce_time_space_seasonal_regional( mv, season=seasonsyr, region=None, vid=None, exclude_axes=[] ): """Reduces the variable mv in all time and space dimensions. Any other dimensions will remain. The averages will be restricted to the the specified season and region. The season should be a cdutil.times.Seasons object. The region may be a string defining a region in defines.py, or it may be a list of four numbers as in defines.py. That is, it would take the form [latmin,latmax,lonmin,lonmax]. """ #if len( set(['time','lat','lon','lev']) & set([ax.id for ax in allAxes(mv)]) )==0: if len( [ax for ax in allAxes(mv) if ax.isTime() or ax.isLatitude() or ax.isLongitude() or ax.isLevel() ] )==0: return mv # nothing to reduce if vid is None: vid = 'reduced_'+mv.id mvreg = select_region(mv, region) axes = allAxes( mvreg ) #axis_names = [ a.id for a in axes if a.id=='lat' or a.id=='lon' or a.id=='lev'] axis_names = [ a.id for a in axes if a.isLatitude() or a.isLongitude() or a.isLevel() and a.id not in exclude_axes] axes_string = '('+')('.join(axis_names)+')' if len(axes_string)>2: for axis in axes: if axis.getBounds() is None and not (axis.isTime() and hasattr(axis,'climatology')): axis._bounds_ = axis.genGenericBounds() mvsav = cdutil.averager( mvreg, axis=axes_string ) mvtsav = calculate_seasonal_climatology(mvsav, season) mvtsav.id = vid #mvtsav = delete_singleton_axis(mvtsav, vid='time') #mvtsav = delete_singleton_axis(mvtsav, vid='lev') #mvtsav = delete_singleton_axis(mvtsav, vid='lat') #mvtsav = delete_singleton_axis(mvtsav, vid='lon') return mvtsav
ec2005564ccaca881e2737cb8f51f05ba091e64d
3,654,441
import argparse def get_args(): """ Function to retrieve and parse the command line arguments, then to return these arguments as an ArgumentParser object. Parameters: None. Returns: parser.parse_args(): inputed or default argument objects. """ parser = argparse.ArgumentParser() parser.add_argument('--input', type=str, default='flowers/test/100/image_07896.jpg', help="path to folder of images") parser.add_argument('--checkpoint', type=str, default='save_directory/checkpoint.pth', help='file to load the checkpoint') parser.add_argument('--category_names', type=str, default='cat_to_name.json', help='file to map the real names') parser.add_argument('--top_k', type=int, default=3, help='top classes predicted to return') parser.add_argument('--gpu', action='store_true', help='hyperparameters for GPU') return parser.parse_args()
6ff328f56f0a12a736d41130dd34b49848ba7dad
3,654,442
import os import tempfile def apply_modifications(model, custom_objects=None): """Applies modifications to the model layers to create a new Graph. For example, simply changing `model.layers[idx].activation = new activation` does not change the graph. The entire graph needs to be updated with modified inbound and outbound tensors because of change in layer building function. Args: model: The `keras.models.Model` instance. Returns: The modified model with changes applied. Does not mutate the original `model`. """ # The strategy is to save the modified model and load it back. This is done because setting the activation # in a Keras layer doesnt actually change the graph. We have to iterate the entire graph and change the # layer inbound and outbound nodes with modified tensors. This is doubly complicated in Keras 2.x since # multiple inbound and outbound nodes are allowed with the Graph API. model_path = os.path.join(tempfile.gettempdir(), next(tempfile._get_candidate_names()) + '.h5') try: model.save(model_path) return load_model(model_path, custom_objects=custom_objects) finally: os.remove(model_path)
16f230511fe689c724b40e08d1f8d2fb52abc71d
3,654,443
import math def fuel_requirement(mass: int) -> int: """Fuel is mass divide by three, round down and subtract 2""" return math.floor(mass / 3) - 2
5899d9260fe7e353c3a1d882f624257d5009248d
3,654,444
def data_head(fname): """ Get the columns-names of the csv Parameters ---------- fname: str Filename of the csv-data Returns ---------- str-list: header-names of the csv-data """ return pd.read_csv(fname, encoding='ISO-8859-1').columns
2b10f0465b30371560a5bc009a2d3a945a80f493
3,654,445
def format(serverDict, sortKeyword='id'): """ Returns an array of nicely formatted servers, sorted by whatever the user prefers, or id by default. """ sortDict = {'id': lambda server: int(server.name[4:-3]), 'uptime': lambda server: server.uptime} sortFunction = sortDict[sortKeyword] class Server: def __init__(self, serverName, dataSet): self.name = str(serverName) self.loadAvgs = dataSet[serverName]['load_avgs'] self.users = dataSet[serverName]['users'] self.uptime = dataSet[serverName]['uptime'] def __str__(self): return str(self.name[:-3]) + " (" + str(self.loadAvgs[1] * 100) + "% mean CPU load, " + str(len(self.users)) + " users online, up for " + cleanTime(self.uptime) + ")" serverList = [] for server in serverDict: serverList.append(Server(server, serverDict)) # Now, sort the list based on the sorting function serverList.sort(key=sortFunction) return serverList
67058d6c0dd6c64a2540be371fa7ba24d081d273
3,654,446
def moray_script(): """ JavaScript関数を公開するためのjsモジュールを生成 Returns: JavaScript関数を公開するためのjsモジュール """ return bottle.static_file('moray.js', root=_root_static_module)
35eebb14902513a2a0e12bf8ce866a8c6d00e193
3,654,447
def load_compdat(wells, buffer, meta, **kwargs): """Load COMPDAT table.""" _ = kwargs dates = meta['DATES'] columns = ['DATE', 'WELL', 'I', 'J', 'K1', 'K2', 'MODE', 'Sat', 'CF', 'DIAM', 'KH', 'SKIN', 'ND', 'DIR', 'Ro'] df = pd.DataFrame(columns=columns) for line in buffer: if '/' not in line: break line = line.split('/')[0].strip() if not line: break vals = line.split() full = [None] * len(columns) full[0] = dates[-1] if not dates.empty else pd.to_datetime('') shift = 1 for i, v in enumerate(vals): if '*' in v: shift += int(v.strip('*')) - 1 else: full[i+shift] = v df = df.append(dict(zip(columns, full)), ignore_index=True) df[['WELL', 'MODE', 'DIR']] = df[['WELL', 'MODE', 'DIR']].applymap( lambda x: x.strip('\'\"') if x is not None else x) df[['I', 'J', 'K1', 'K2']] = df[['I', 'J', 'K1', 'K2']].astype(int) df[['Sat', 'CF', 'DIAM', 'KH', 'Ro']] = df[['Sat', 'CF', 'DIAM', 'KH', 'Ro']].astype(float) for k, v in DEFAULTS.items(): if k in df: df[k] = df[k].fillna(v) if not df.empty: welldata = {k: {'COMPDAT': v.reset_index(drop=True)} for k, v in df.groupby('WELL')} wells.update(welldata, mode='a', ignore_index=True) return wells
fb28b82ba6ad36c3aea45e31c684c9302cdf511c
3,654,448
from typing import Optional def scale_random(a: float, b: float, loc: Optional[float] = None, scale: Optional[float] = None) -> float: """Returns a value from a standard normal truncated to [a, b] with mean loc and standard deviation scale.""" return _default.scale_random(a, b, loc=loc, scale=scale)
3c336cd3c345f0366bd721ff2a3a426853804721
3,654,449
def created_link(dotfile: ResolvedDotfile) -> str: """An output line for a newly-created link. """ return ( co.BOLD + co.BRGREEN + OK + " " + ln(dotfile.installed.disp, dotfile.link_dest) + co.RESET )
9195db9c3ea8f7aa6281017ef62967ef5b07f4f3
3,654,450
def instruction2_task(scr): """ Description of task 1 """ scr.draw_text(text = "Great Work!! "+ "\n\nNow comes your TASK 3: **Consider an image**."+ "\n\nIf you press the spacebar now, an image will "+ "appear at the bottom of the screen. You can use the information from the"+ " image to make any modifications to the translation of the sentence."+ "\n\n***However in certain cases, the image is not related to the sentence "+ "or not present at all.***"+ "\n\nAfter looking at the image, say loudly if you'd like to modify your translation"+ " by saying "+ "\"I'd like to modify my translation.\" or \"I'd keep the same translation\""+ "\nif you would like to stick with your translation."+ "\n\nThe final TASK 4 is to **Say the translation again (modified or not)**."+ "\nPlease press the spacebar to indicate the start of your new translation.\nYou can stop your"+ " recording by pressing the spacebar and moving to the next sentence.", fontsize = 25) return scr
554191b520e1229ffc076bbed1c57f265e0c0964
3,654,451
import os def tail(f, lines=10, _buffer=4098): """Tail a file and get X lines from the end""" # place holder for the lines found lines_found = [] # block counter will be multiplied by buffer # to get the block size from the end block_counter = -1 # loop until we find X lines while len(lines_found) < lines: try: f.seek(block_counter * _buffer, os.SEEK_END) except IOError: # either file is too small, or too many lines requested f.seek(0) lines_found = f.readlines() break lines_found = f.readlines() # we found enough lines, get out # Removed this line because it was redundant the while will catch # it, I left it for history # if len(lines_found) > lines: # break # decrement the block counter to get the # next X bytes block_counter -= 1 return lines_found[-lines:]
20ccac940eff04a6ec57d98d32330ebfbb97037d
3,654,452
def loggedin_and_owner_required(func): """ Decorator that applies to functions expecting the "owner" name as a second argument. It will check that the visitor is also considered as the owner of the resource it is accessing. Note: automatically calls login_required and check_and_set_owner decorators. """ # TODO when not logged in send a 401 authentication requested and # implement corresponding template (at least send a 401 for non-GET # requests !) @login_required(login_url=settings.LOGIN_URL) @check_and_set_owner def _loggedin_and_owner_required(request, owner_name, *args, **kwargs): if request.user != request.owner_user: return HttpResponseForbidden() else: return func(request, owner_name, *args, **kwargs) return _loggedin_and_owner_required
171695cc6b6dad2240fbe63ba8ab3193255fee7f
3,654,453
def recursive_subs(e: sp.Basic, replacements: list[tuple[sp.Symbol, sp.Basic]]) -> sp.Basic: """ Substitute the expressions in ``replacements`` recursively. This might not be necessary in all cases, Sympy's builtin ``subs()`` method should also do this recursively. .. note:: The order of the tuples in ``replacements`` might matter, make sure to order these sensibly in case the expression contains a lot of nested substitutions. Parameters ---------- e : sp.Basic Input expression replacements : list[tuple[sp.Symbol, sp.Basic]] List of replacements: ``symbol, replace`` Returns ------- sp.Basic Substituted expression """ for _ in range(0, len(replacements) + 1): new_e = e.subs(replacements) if new_e == e: return new_e else: e = new_e return new_e
013a203d214eb7c683efdefc2bc0b60781260576
3,654,454
def create_lag_i(df,time_col,colnames,lag): """ the table should be index by i,year """ # prepare names if lag>0: s = "_l" + str(lag) else: s = "_f" + str(-lag) values = [n + s for n in colnames] rename = dict(zip(colnames, values)) # create lags dlag = df.reset_index() \ .assign(t=lambda d: d[time_col] + lag) \ .rename(columns=rename)[['i',time_col] + values] \ .set_index(['i',time_col]) # join and return return(df.join(dlag))
be6d4b390ae66cd83320b2c341ba3c76cfad2bdb
3,654,455
def crop_image(image_array, point, size): """ Cropping the image into the assigned size image_array: numpy array of image size: desirable cropped size return -> cropped image array """ img_height, img_width = point # assigned location in crop # for color image if len(image_array.shape) == 3: image_array = image_array[:, img_height:img_height + size[0], img_width:img_width + size[1]] # for gray image elif len(image_array.shape) == 2: image_array = image_array[img_height:img_height + size[0], img_width:img_width + size[1]] return image_array
8ee684719e3e4fea755466e810c645c1ccf7d7f5
3,654,456
def deg_to_rad(deg): """Convert degrees to radians.""" return deg * pi / 180.0
e07bfcb4a541bddedeb8e9a03d6033b48d65c856
3,654,457
def find_plane_normal(points): """ d - number of dimensions n - number of points :param points: `d x n` array of points :return: normal vector of the best-fit plane through the points """ mean = np.mean(points, axis=1) zero_centre = (points.T - mean.T).T U, s, VT = np.linalg.svd(zero_centre) normal = U[:, -1] return normal
3edd4a848b50cffe9a78c6f75999c79934fd5003
3,654,458
def binary_search(data, target, low, high): """Return True if target is found in indicated portion of a Python list. The search only considers the portion from data[low] to data[high] inclusive. """ if low > high: return False # interval is empty; no match else: mid = (low + high) // 2 if target == data[mid]: # found a matcha return True elif target < data[mid]: # recur on the portion left of the middle return binary_search(data, target, low, mid - 1) else: # recur on the portion right of the middle return binary_search(data, target, mid + 1, high)
4395434aea4862e7fc0cab83867f32955b8fb2a2
3,654,459
import time def ReadUnifiedTreeandHaloCatalog(fname, desiredfields=[], icombinedfile=1,iverbose=1): """ Read Unified Tree and halo catalog from HDF file with base filename fname. Parameters ---------- Returns ------- """ if (icombinedfile): hdffile=h5py.File(fname,'r') #load data sets containing number of snaps headergrpname="Header/" numsnaps=hdffile[headergrpname].attrs["NSnaps"] #allocate memory halodata=[dict() for i in range(numsnaps)] numhalos=[0 for i in range(numsnaps)] atime=[0 for i in range(numsnaps)] tree=[[] for i in range(numsnaps)] cosmodata=dict() unitdata=dict() #load cosmology data cosmogrpname="Cosmology/" fieldnames=[str(n) for n in hdffile[headergrpname+cosmogrpname].attrs.keys()] for fieldname in fieldnames: cosmodata[fieldname]=hdffile[headergrpname+cosmogrpname].attrs[fieldname] #load unit data unitgrpname="Units/" fieldnames=[str(n) for n in hdffile[headergrpname+unitgrpname].attrs.keys()] for fieldname in fieldnames: unitdata[fieldname]=hdffile[headergrpname+unitgrpname].attrs[fieldname] #for each snap load the appropriate group start=time.clock() for i in range(numsnaps): snapgrpname="Snap_%03d/"%(numsnaps-1-i) if (iverbose==1): print("Reading ",snapgrpname) isnap=hdffile[snapgrpname].attrs["Snapnum"] atime[isnap]=hdffile[snapgrpname].attrs["scalefactor"] numhalos[isnap]=hdffile[snapgrpname].attrs["NHalos"] if (len(desiredfields)>0): fieldnames=desiredfields else: fieldnames=[str(n) for n in hdffile[snapgrpname].keys()] for catvalue in fieldnames: halodata[isnap][catvalue]=np.array(hdffile[snapgrpname+catvalue]) hdffile.close() print("read halo data ",time.clock()-start) else : hdffile=h5py.File(fname+".snap_000.hdf.data",'r') numsnaps=int(hdffile["NSnaps"][0]) #get field names fieldnames=[str(n) for n in hdffile.keys()] #clean of header info fieldnames.remove("Snapnum") fieldnames.remove("NSnaps") fieldnames.remove("NHalos") fieldnames.remove("TotalNHalos") fieldnames.remove("scalefactor") if (len(desiredfields)>0): fieldnames=desiredfields hdffile.close() halodata=[[] for i in range(numsnaps)] numhalos=[0 for i in range(numsnaps)] atime=[0 for i in range(numsnaps)] tree=[[] for i in range(numsnaps)] start=time.clock() for i in range(numsnaps): hdffile=h5py.File(fname+".snap_%03d.hdf.data"%(numsnaps-1-i),'r') atime[i]=(hdffile["scalefactor"])[0] numhalos[i]=(hdffile["NHalos"])[0] halodata[i]=dict() for catvalue in fieldnames: halodata[i][catvalue]=np.array(hdffile[catvalue]) hdffile.close() print("read halo data ",time.clock()-start) #lets ignore the tree file for now for i in range(numsnaps): tree[i]=dict() return atime,tree,numhalos,halodata,cosmodata,unitdata if (icombinedfile==1): hdffile=h5py.File(fname+".tree.hdf.data",'r') treefields=["haloID", "Num_progen"] #do be completed for Progenitor list although information is contained in the halo catalog by searching for things with the same head #treefields=["haloID", "Num_progen", "Progen"] for i in range(numsnaps): snapgrpname="Snap_%03d/"%(numsnaps-1-i) tree[i]=dict() for catvalue in treefields: """ if (catvalue==treefields[-1]): tree[i][catvalue]=[[]for j in range(numhalos[i])] for j in range(numhalos[i]): halogrpname=snapgrpname+"/Halo"+str(j) tree[i][catvalue]=np.array(hdffile[halogrpname+catvalue]) else: tree[i][catvalue]=np.array(hdffile[snapgrpname+catvalue]) """ tree[i][catvalue]=np.array(hdffile[snapgrpname+catvalue]) hdffile.close() return atime,tree,numhalos,halodata,cosmodata,unitdata
7efc107d5b6eb8a9747d09108f0e89c0b25bb253
3,654,460
import re def lines_in_pull(pull): """Return a line count for the pull request. To consider both added and deleted, we add them together, but discount the deleted count, on the theory that adding a line is harder than deleting a line (*waves hands very broadly*). """ ignore = r"(/vendor/)|(conf/locale)|(static/fonts)|(test/data/uploads)" lines = 0 files = pull.get_files() for f in files: if re.search(ignore, f.filename): #print("Ignoring file {}".format(f.filename)) continue lines += f.additions + f.deletions//5 if pull.combinedstate == "merged" and lines > 2000: print("*** Large pull: {lines:-6d} lines, {pr.created_at} {pr.number:-4d}: {pr.title}".format(lines=lines, pr=pull)) return lines
24aabd83c24c3f337f07b50c894f5503eadfc252
3,654,461
import tempfile import os import shutil import errno def plot_panels(volume, panels, figsize=(16, 9), save_name=None): """Plot on the same figure a number of views, as defined by a list of panel Parameters ---------- volume : cortex.Volume The data to plot. panels : list of dict List of parameters for each panel. An example of panel is: { 'extent': [0.000, 0.000, 0.300, 0.300], 'view': { 'hemisphere': 'left', 'angle': 'lateral_pivot', 'surface': 'inflated', } } The `extent` and `zoom` entries are ordered as [left, bottom, width, height] with values in [0, 1]. figsize : tuple of float Size of the figure. save_name : str or None Name of the file where the figure is saved. None to not save. Can end with different extensions, such as '.png' of '.pdf'. Returns ------- fig : matplotlib.Figure Created figure. Can be used for instance for custom save functions. Example ------- >>> from cortex.export import plot_panels, params_flatmap_lateral_medial >>> plot_panels(volume, **params_flatmap_lateral_medial) """ # list of couple of angles and surfaces angles_and_surfaces = [(panel['view']['angle'], panel['view']['surface']) for panel in panels] # remove redundant couples, e.g. left and right angles_and_surfaces = list(set(angles_and_surfaces)) list_angles, list_surfaces = list(zip(*angles_and_surfaces)) # create all images temp_dir = tempfile.mkdtemp() base_name = os.path.join(temp_dir, 'fig') filenames = save_3d_views(volume, base_name, list_angles=list_angles, list_surfaces=list_surfaces, trim=True, size=(1600 * 4, 900 * 4)) fig = plt.figure(figsize=figsize) for panel in panels: # load image angle_and_surface = (panel['view']['angle'], panel['view']['surface']) index = angles_and_surfaces.index(angle_and_surface) image = plt.imread(filenames[index]) # chose hemisphere if 'hemisphere' in panel['view']: left, right = np.split(image, [image.shape[1] // 2], axis=1) if panel['view']['hemisphere'] == 'left': image = left else: image = right # trim white borders image = image[image.sum(axis=1).sum(axis=1) > 0] image = image[:, image.sum(axis=0).sum(axis=1) > 0] # zoom if 'zoom' in panel['view']: left, bottom, width, height = panel['view']['zoom'] left = int(left * image.shape[1]) width = int(width * image.shape[1]) bottom = int(bottom * image.shape[0]) height = int(height * image.shape[0]) image = image[bottom:bottom + height] image = image[:, left:left + width] # add ax and image ax = plt.axes(panel['extent']) ax.axis('off') ax.imshow(image) # note that you might get a slightly different layout with `plt.show()` # since it might use a different backend if save_name is not None: fig.savefig(save_name, bbox_inches='tight', dpi=100) # delete temporary directory try: shutil.rmtree(temp_dir) except OSError as e: # reraise if the directory has not already been deleted if e.errno != errno.ENOENT: raise return fig
bbd3b612ea3e10f47b94e0cb4588493052568d16
3,654,462
def get_active_milestones(session, project): """Returns the list of all the active milestones for a given project.""" query = ( session.query(model.Issue.milestone) .filter(model.Issue.project_id == project.id) .filter(model.Issue.status == "Open") .filter(model.Issue.milestone.isnot(None)) ) return sorted([item[0] for item in query.distinct()])
8a4c23ada7b18796ea76c770033320f29c0e8d5d
3,654,463
def set_camera_parameters(cfg): """ Set camera parameters. All values come from the dict generated from the JSON file. :param cfg: JSON instance. :type cam: dict :return: None :rtype: None """ # set camera resolution [width x height] camera = PiCamera() camera.resolution = cfg["stream"]["resolution"] # set camera frame rate [Hz] camera.framerate = cfg["stream"]["framerate"] # exposure mode camera.exposure_mode = cfg["exposure"]["mode"] if cfg["exposure"]["set_iso"]: camera.iso = cfg["exposure"]["iso"] return camera
3bd7b0b410d7a19f486a8e3fc80d50af4caa1734
3,654,464
def get_srl_result_for_instance(srl_dict, instance): """Get SRL output for an instance.""" sent_id = instance.sent_id tokens_gold = instance.tokens srl_output = srl_dict[sent_id] srl_output["words"] = [word for word in srl_output["words"] if word != "\\"] tokens_srl = srl_output['words'] if tokens_srl != tokens_gold: srl2gold_id_map = get_gold_map(tokens_srl, tokens_gold) else: srl2gold_id_map = {i: i for i in range(len(tokens_srl))} return srl_output, srl2gold_id_map
4437e68817469966d70759bf038b68c6b5983745
3,654,465
import sys def ensure_tty(file=sys.stdout): """ Ensure a file object is a tty. It must have an `isatty` method that returns True. TypeError is raised if the method doesn't exist, or returns False. """ isatty = getattr(file, 'isatty', None) if isatty is None: raise TypeError( 'Cannot detect tty, file has no `isatty` method: {}'.format( getattr(file, 'name', type(file).__name__) ) ) if not isatty(): raise TypeError( 'This will not work, file object is not a tty: {}'.format( getattr(file, 'name', type(file).__name__) ) ) return True
52981903549b5241c22073df94b39db3eb4e3271
3,654,466
from typing import List def chop_cells(text: str, max_size: int, position: int = 0) -> List[str]: """Break text in to equal (cell) length strings.""" _get_character_cell_size = get_character_cell_size characters = [ (character, _get_character_cell_size(character)) for character in text ][::-1] total_size = position lines: List[List[str]] = [[]] append = lines[-1].append pop = characters.pop while characters: character, size = pop() if total_size + size > max_size: lines.append([character]) append = lines[-1].append total_size = size else: total_size += size append(character) return ["".join(line) for line in lines]
d8d0bd558b48a43775aed3cb5e15a3889fdc653d
3,654,467
def read_input_field_lonlat( input_file, fld_name, level, conf_in, *, conv_fact=None, crop=0, ): """Read from file and pre-process a field. Returns the field as a Field2D object. Arguments: - input_file: Input netCDF file. - fld_name: Name of the input field used in the input file. - conf_in: Input configuration. Optional arguments: - conv_fact: Conversion factor applied to the field. - crop: cut N pixels off around the domain """ lon, lat = read_lonlat2d( infile=conf_in["infile_lonlat"], name_lon=conf_in["lonlat_names"][0], name_lat=conf_in["lonlat_names"][1], transpose2d=conf_in["infield_transpose"], reduce_grid_res=conf_in["reduce_grid_resolution"], reduce_grid_stride=conf_in["reduce_grid_stride"], ) # Read the raw field from file try: with nc4.Dataset(input_file, "r") as fi: # Strip leading time dimension fld_raw = fi[fld_name][0].astype(np.float32) except Exception as e: err = "Cannot read '{}' from {}\n{}: {}".format( fld_name, input_file, e.__class__.__name__, str(e).strip() ) raise IOError(err) if conf_in["infield_transpose"]: fld_raw = fld_raw.T # SR_TMP < assert lon.shape == fld_raw.shape # SR_TMP > # Shrink domain if crop is not None and crop > 0: fld_raw = fld_raw[crop:-crop, crop:-crop] lon = lon[crop:-crop, crop:-crop] lat = lat[crop:-crop, crop:-crop] # Select level if level is not None: fld_raw = fld_raw[level, :, :] # Apply a conversion factor if conv_fact is not None: fld_raw *= conv_fact # Create a Field2D object fld = Field2D(fld_raw, lon, lat) return fld
2ad31cee8ea26abcb7982fc4f5a9518dd11872c4
3,654,468
def multiply_scenarios(sep, *args): """ Create the cross product of two lists of scenarios """ result = None for scenes in args: if result == None: result = scenes else: total = [] for scena in result: for scenb in scenes: # Create a merged scenario with a concatenated name name = scena[0] + sep + scenb[0] tdict = {} tdict.update(scena[1]) tdict.update(scenb[1]) # If there is a 'P' value, it represents the # probability that we want to use this scenario # If both scenarios list a probability, multiply them. if 'P' in scena[1] and 'P' in scenb[1]: P = scena[1]['P'] * scenb[1]['P'] tdict['P'] = P total.append((name, tdict)) result = total return check_scenarios(result)
ef44d9cfcd01304be2d56215caea676dfc26d01b
3,654,469
def export_output(): """ Returns a function that will return the contents of the first file in a zip file which is not named '_metadata.csv' """ def fn(export: FlexibleDataExport): out = BytesIO() export.file_format = FileFormat.ZIP_CSV export.write_data(out) with ZipFile(out, 'r') as zipfile: names = [name for name in zipfile.namelist() if name != '_metadata.csv'] with zipfile.open(names[0], 'r') as infile: return infile.read().decode('utf-8') yield fn
dd94d996e72d01c287d8a1b57979d47b89e6a207
3,654,470
def compute_total_probability_vector(mix_coeff_matrix, kernel_probability_matrix): """ Computes the total, weighted probability vector using the mixture coefficient matrix and the kernel probability matrix. """ # Start writing code here. The computation for the total probability vector can be # written in one line! total_probability_vector=K.sum(mix_coeff_matrix*kernel_probability_matrix,axis=1, keepdims=True) # Return statement here. return total_probability_vector
9c9d97dd8d7c83be02bb91a9924994c36700cbd8
3,654,471
def mnist_noniid(dataset, num_users): """ Sample non-I.I.D client data from MNIST dataset :param dataset: :param num_users: :return: """ num_shards, num_imgs = 200, 300 idx_shard = [i for i in range(num_shards)] dict_users = {i: np.array([], dtype='int64') for i in range(num_users)} idxs = np.arange(num_shards * num_imgs) labels = dataset.train_labels.numpy() # sort labels idxs_labels = np.vstack((idxs, labels)) idxs_labels = idxs_labels[:, idxs_labels[1, :].argsort()] idxs = idxs_labels[0, :] # divide and assign for i in range(num_users): rand_set = set(np.random.choice(idx_shard, 2, replace=False)) idx_shard = list(set(idx_shard) - rand_set) for rand in rand_set: dict_users[i] = np.concatenate((dict_users[i], idxs[rand * num_imgs:(rand + 1) * num_imgs]), axis=0) return dict_users
8194cf27698d9e721f739ed405f56c8fddbe581a
3,654,472
def first_order_model(nt, rates): """ Returns the first-order model asymptotic solution for a network nt. Takes a list of interaction weigths (in the same order as the list of nodes) as the "rates" argument """ if type(nt) == list: nt = az.transform(nt) M = network_matrix(nt, rates=rates) elif type(nt) == np.ndarray: M = nt nt = None else: M = network_matrix(nt, rates=rates) L, V = np.linalg.eig(M) kmax = np.real(L).argmax() return (np.real(V[:,kmax])/np.real(V[:,kmax]).sum(), az.transform(nt))
8348e68568d3fb9f5236a1e7852f2b1cb8c2860d
3,654,473
import requests def save_to_disk(url, save_path): """ Saves to disk non-destructively (xb option will not overwrite) """ print('Downloading: %s' % url) r = requests.get(url) if r.status_code == 404: print('URL broken, unable to download: %s' % url) return False else: with open(save_path, 'xb') as f: f.write(r.content) return True
c9917a637026d999765364d3c276150681554129
3,654,474
import argparse def arguments(): """Parse arguments. Returns ------- argparse.Namespace Returns Argparse Namespace. """ parser = argparse.ArgumentParser(prog='pyslackdesc', description="pyslackdesc - simple, \ interactive script to generate \ Slack-desc files", epilog="Have fun!") parser.add_argument("-i", "--interactive", default=False, help="run script in interactive mode", action="store_true") parser.add_argument("-o", "--output", default='slack-desc', metavar='filename', help="output file (default is slack-desc)") parser.add_argument("-v", "--verbose", help="show generated file", action="store_true", default=False) parser.add_argument("-V", "--version", action='version', version='%(prog)s ' '{version}'.format(version=__version__)) # Add group cmd_parser = parser.add_argument_group('commandline mode') cmd_parser.add_argument("-n", "--name", nargs=1, metavar='name', type=str, help="program name (single word)") cmd_parser.add_argument("-s", "--short", nargs='+', metavar='"short description"', type=str, help="program short description (one line)") cmd_parser.add_argument("-d", "--description", nargs='+', metavar='"long description"', help="program long description (up to 6 lines)") cmd_parser.add_argument("-u", "--url", nargs=1, metavar='url', help="program URL") args = parser.parse_args() return args
4165c2e97ffa6705941c2dd9aeb006cfc567846c
3,654,475
def render_settings_window(s_called, s_int, ntfc_called, ntfc_state, s_state): """ Render the settings window """ win = Settings(s_called, s_int, ntfc_called, ntfc_state, s_state) win.connect("delete-event", Gtk.main_quit) win.show_all() Gtk.main() return win.settings_called, win.interval, win.notifications_called, win.notifications_state
30f5a64b822d408b4f9ca4d83047753fa55eaa58
3,654,476
import json def server(server_id): """ Returns a list of sourcemod servers """ data = {} db_server = ServerModel.select().join(IPModel) db_server = db_server.where(ServerModel.id == server_id).get() server_address = (db_server.ip.address, db_server.port) info = {} try: querier = ServerQuerier(server_address, 1) info = querier.get_info() except NoResponseError: pass players = [] try: players = querier.get_players()["players"] except BrokenMessageError: pass except NoResponseError: pass data["id"] = db_server.id for key in info: data[key] = str(info[key]) data["players"] = [] for player in players: player_data = {} for key in player: if type(player[key]) == str: player_data[key] = player[key].encode('utf8') continue player_data[key] = player[key] data["players"].append(player_data) return json.dumps(data)
a52fd4bbaefefff5e667dd1dc1b06f68b7643810
3,654,477
def atom_hsoc(case, soc): """ Return atomic spin-orbit coupling matrix :math:`\\vec{l}\cdot\\vec{s}` in complex spherical harmonics basis. Parameters ---------- case : str String label indicating atomic shell, - 'p': for :math:`p` -shell. - 't2g': for :math:`t_{2g}` -shell. - 'd': for :math:`d` -shell. - 'f': for :math:`f` -shell. soc : float The strength of spin-orbit coupling. Returns ------- hsoc : 2d complex array The spin-orbit coupling matrix. """ sqrt2 = np.sqrt(2.0) sqrt6 = np.sqrt(6.0) sqrt10 = np.sqrt(10.0) sqrt12 = np.sqrt(12.0) if case.strip() == 'p': hsoc = np.zeros((6, 6), dtype=np.complex128) hsoc[0,0] = -1.0 hsoc[3,0] = sqrt2 hsoc[1,1] = 1.0 hsoc[5,2] = sqrt2 hsoc[0,3] = sqrt2 hsoc[4,4] = 1.0 hsoc[2,5] = sqrt2 hsoc[5,5] = -1.0 return 0.5 * soc * hsoc elif case.strip() == 't2g': hsoc = np.zeros((6, 6), dtype=np.complex128) hsoc[0,0] = -1.0 hsoc[3,0] = sqrt2 hsoc[1,1] = 1.0 hsoc[5,2] = sqrt2 hsoc[0,3] = sqrt2 hsoc[4,4] = 1.0 hsoc[2,5] = sqrt2 hsoc[5,5] = -1.0 return 0.5 * -soc * hsoc elif case.strip() == 'd': hsoc = np.zeros((10, 10), dtype=np.complex128) hsoc[0,0] = -2.0 hsoc[3,0] = 2.0 hsoc[1,1] = 2.0 hsoc[2,2] = -1.0 hsoc[5,2] = sqrt6 hsoc[0,3] = 2.0 hsoc[3,3] = 1.0 hsoc[7,4] = sqrt6 hsoc[2,5] = sqrt6 hsoc[6,6] = 1.0 hsoc[9,6] = 2.0 hsoc[4,7] = sqrt6 hsoc[7,7] = -1.0 hsoc[8,8] = 2.0 hsoc[6,9] = 2.0 hsoc[9,9] = -2.0 return 0.5 * soc * hsoc elif case.strip() == 'f': hsoc = np.zeros((14, 14), dtype=np.complex128) hsoc[0,0 ] = -3.0 hsoc[3,0 ] = sqrt6 hsoc[1,1 ] = 3.0 hsoc[2,2 ] = -2.0 hsoc[5,2 ] = sqrt10 hsoc[0,3 ] = sqrt6 hsoc[3,3 ] = 2.0 hsoc[4,4 ] = -1.0 hsoc[7,4 ] = sqrt12 hsoc[2,5 ] = sqrt10 hsoc[5,5 ] = 1.0 hsoc[9,6 ] = sqrt12 hsoc[4,7 ] = sqrt12 hsoc[8,8 ] = 1.0 hsoc[11,8 ] = sqrt10 hsoc[6,9 ] = sqrt12 hsoc[9,9 ] = -1.0 hsoc[10,10] = 2.0 hsoc[13,10] = sqrt6 hsoc[8,11 ] = sqrt10 hsoc[11,11] = -2.0 hsoc[12,12] = 3.0 hsoc[10,13] = sqrt6 hsoc[13,13] = -3.0 return 0.5 * soc * hsoc else: print("don't support SOC for this case: ", case) return
d1c87105831952746e7b089480058b38c382bcd5
3,654,478
def wcs_to_celestial_frame(wcs): """ For a given WCS, return the coordinate frame that matches the celestial component of the WCS. Parameters ---------- wcs : :class:`~astropy.wcs.WCS` instance The WCS to find the frame for Returns ------- frame : :class:`~astropy.coordinates.baseframe.BaseCoordinateFrame` subclass instance An instance of a :class:`~astropy.coordinates.baseframe.BaseCoordinateFrame` subclass instance that best matches the specified WCS. Notes ----- To extend this function to frames not defined in astropy.coordinates, you can write your own function which should take a :class:`~astropy.wcs.WCS` instance and should return either an instance of a frame, or `None` if no matching frame was found. You can register this function temporarily with:: >>> from astropy.wcs.utils import wcs_to_celestial_frame, custom_frame_mappings >>> with custom_frame_mappings(my_function): ... wcs_to_celestial_frame(...) """ for mapping_set in WCS_FRAME_MAPPINGS: for func in mapping_set: frame = func(wcs) if frame is not None: return frame raise ValueError("Could not determine celestial frame corresponding to " "the specified WCS object")
74f798f0f19566acf9f2115edf47ee2cf262ca0b
3,654,479
def conv2d(x, f=64, k=3, d=1, act=None, pad='SAME', name='conv2d'): """ :param x: input :param f: filters, default 64 :param k: kernel size, default 3 :param d: strides, default 2 :param act: activation function, default None :param pad: padding (valid or same), default same :param name: scope name, default conv2d :return: covn2d net """ return tf.layers.conv2d(x, filters=f, kernel_size=k, strides=d, kernel_initializer=tf.contrib.layers.variance_scaling_initializer(), kernel_regularizer=tf.contrib.layers.l2_regularizer(5e-4), bias_initializer=tf.zeros_initializer(), activation=act, padding=pad, name=name)
86e2b6b9ac21074da460ee2785ef6fca317e0417
3,654,480
def _is_uniform_distributed_cf(cf): """ Check if the provided center frequencies are uniformly distributed. """ return np.any(np.diff(np.diff(cf))!=0)
c8cee1832ff4664839a0adc1263f3ece94673ad7
3,654,481
def build_person(first_name, last_name): """Return a dictionary of information about a person.""" person = {'first': first_name, 'last': last_name} return person
c8da8a5c4d4b7403804eff55e38106bb5921cf06
3,654,482
def radon(image, theta=None): """ Calculates the radon transform of an image given specified projection angles. Parameters ---------- image : array_like, dtype=float Input image. theta : array_like, dtype=float, optional (default np.arange(180)) Projection angles (in degrees). Returns ------- output : ndarray Radon transform (sinogram). """ if image.ndim != 2: raise ValueError('The input image must be 2-D') if theta is None: theta = np.arange(180) height, width = image.shape diagonal = np.sqrt(height**2 + width**2) heightpad = np.ceil(diagonal - height) widthpad = np.ceil(diagonal - width) padded_image = np.zeros((int(height + heightpad), int(width + widthpad))) y0, y1 = int(np.ceil(heightpad / 2)), \ int((np.ceil(heightpad / 2) + height)) x0, x1 = int((np.ceil(widthpad / 2))), \ int((np.ceil(widthpad / 2) + width)) padded_image[y0:y1, x0:x1] = image out = np.zeros((max(padded_image.shape), len(theta))) h, w = padded_image.shape dh, dw = h // 2, w // 2 shift0 = np.array([[1, 0, -dw], [0, 1, -dh], [0, 0, 1]]) shift1 = np.array([[1, 0, dw], [0, 1, dh], [0, 0, 1]]) def build_rotation(theta): T = -np.deg2rad(theta) R = np.array([[np.cos(T), -np.sin(T), 0], [np.sin(T), np.cos(T), 0], [0, 0, 1]]) return shift1.dot(R).dot(shift0) for i in range(len(theta)): rotated = homography(padded_image, build_rotation(-theta[i])) out[:, i] = rotated.sum(0)[::-1] return out
9395e742353def0db9fa26e955d80c31a0c84d55
3,654,483
def build_idrac_table_schemas(metric_definitions: list): """build_table_schemas Build iDRAC Table Schemas Build table schemas based on the idrac telemetry metric definitions Args: metric_definitions (list): idrac telemetry metric definitions Returns: dict: iDRAC table schemas """ table_schemas = {} try: for metric in metric_definitions: table_name = metric['Id'] metric_type = metric['MetricDataType'] metric_unit = metric.get('Units', None) # For network metrics, use BIG INT for storing the metric readings if metric_unit == 'By' or metric_unit == 'Pkt': value_type = 'BIGINT' else: value_type = utils.data_type_mapping.get(metric_type, 'TEXT') column_names = ['Timestamp', 'NodeID', 'Source', 'FQDD', 'Value'] column_types = ['TIMESTAMPTZ NOT NULL', 'INT NOT NULL', 'TEXT', \ 'TEXT', value_type] table_schemas.update({ table_name: { 'column_names': column_names, 'column_types': column_types, } }) except Exception as err: log.error(f"Cannot build idrac table schemas: {err}") return table_schemas
5f7b6b5807f009d56b1f2aabeb86d0ddfcbdf44f
3,654,484
from typing import Tuple def _increasing_randomly_negate_to_arg( level: int, params: Tuple[float, float] ) -> Tuple[float]: """ Convert level to transform magnitude. This assumes transform magnitude increases (or decreases with 50% chance) linearly with level. Args: level (int): Level value. params (Tuple[float, float]): Params contains two values: 1) Base transform magnitude when level is 0; 2) Maxmimum increasing in transform magnitude when level is at maxmimum. """ magnitude = (level / _AUGMENTATION_MAX_LEVEL) * params[1] return (params[0] + _randomly_negate(magnitude),)
a1e9cc220753132cfeb1426967d2cd648bc78fa8
3,654,485
import json import hashlib def hashify(params, max_length=8): """ Create a short hashed string of the given parameters. :param params: A dictionary of key, value pairs for parameters. :param max_length: [optional] The maximum length of the hashed string. """ param_str = json.dumps(params, separators=(',', ':'), sort_keys=True) param_hash = hashlib.md5(param_str.encode('utf-8')).hexdigest() return param_hash[:max_length]
e4a97a28fc2d0564da3e6b22f32735b4a2534c3e
3,654,486
import os import io import re def version(package, encoding='utf-8'): """Obtain the packge version from a python file e.g. pkg/__init__.py See <https://packaging.python.org/en/latest/single_source_version.html>. """ path = os.path.join(os.path.dirname(__file__), package, '__init__.py') with io.open(path, encoding=encoding) as fp: version_info = fp.read() version_match = re.search(r"""^__version__ = ['"]([^'"]*)['"]""", version_info, re.M) if not version_match: raise RuntimeError("Unable to find version string.") return version_match.group(1)
6066b042a698d0ee2b816573a144c4dc5ac47a45
3,654,487
def unique_entries(results): """Prune non-unqiue search results.""" seen = set() clean_results = [] for i in results: if i['code'] not in seen: clean_results.append(i) seen.add(i['code']) return clean_results
c0c55ebd5aa76f3a7f44134a972019c3d26c1c48
3,654,488
def get_q_confidence() -> int: """Get's the user's confidence for the card""" response = input("How confident do you feel about being able to answer this question (from 1-10)? ") if response.isnumeric() & 0 < response <= 10: return int(response) else: print("Incorrect score value, please enter a number from 1 to 10.") # we call the function until it returns the appropriate value get_q_confidence()
e61ceb5676703a795a24f99ee7849a362186ec84
3,654,489
def generate_offices_table(offices, by_office, by_polling_center, election_day, day_after_election_day): """ Pre-compute key data needed for generating election day office reports. """ offices_by_key = {str(office['code']): office for office in offices} rows = [] for key in sorted([key for key in by_office.keys()]): row = by_office[key] key = str(key) # copy name from the offices hash array row['english_name'] = offices_by_key[key]['english_name'] row['arabic_name'] = offices_by_key[key]['arabic_name'] on_election_day = row.get(election_day, {}) # get election day numbers row['opened'] = on_election_day.get('opened', 0) row['votes_reported_1'] = on_election_day.get('1', 0) row['votes_reported_2'] = on_election_day.get('2', 0) row['votes_reported_3'] = on_election_day.get('3', 0) # and aggregate counts row['reported_1'] = on_election_day.get('1_count', 0) row['reported_2'] = on_election_day.get('2_count', 0) row['reported_3'] = on_election_day.get('3_count', 0) # check for late results # We only want late reports for period 4. The JSON data has aggregate # numbers for office by day, but you can't tell which of those values are new reports on # EDAY+1 and which ones are replacements for values given on EDAY, so we have to iterate # through each center to get that info row['votes_reported_4'] = 0 reported_4 = 0 # Which polling centers are in this office? centers = {k: v for k, v in by_polling_center.items() if str(v['office_id']) == key} for center_id, center in centers.items(): if day_after_election_day in center and '4' in center[day_after_election_day]: # found a period 4 report on EDAY+1. Sum the votes and increment the report count row['votes_reported_4'] += center[day_after_election_day]['4'] reported_4 += 1 elif election_day in center and '4' in center[election_day]: # didn't find an EDAY+1 report, so use EDAY, if present row['votes_reported_4'] += center[election_day]['4'] reported_4 += 1 row['reported_4'] = reported_4 # save derived values row['not_opened'] = row['polling_center_count'] - row['opened'] row['not_reported_1'] = row['polling_center_count'] - row['reported_1'] row['not_reported_2'] = row['polling_center_count'] - row['reported_2'] row['not_reported_3'] = row['polling_center_count'] - row['reported_3'] row['not_reported_4'] = row['polling_center_count'] - reported_4 row['closed'] = reported_4 # reporting final tally means center closed rows.append(row) return rows
85111ed67e8f6b8dce71af2844ee865699f3fe01
3,654,490
import time import random import select def bang(nick, chan, message, db, conn, notice): """when there is a duck on the loose use this command to shoot it.""" global game_status, scripters if chan in opt_out: return network = conn.name score = "" out = "" miss = ["You just shot yourself in the foot, the duck laughed at you as it flew off.", "WHOOSH! You missed the duck completely!", "Your gun jammed!", "Better luck next time.", "Your barrel must be bent lol, maybe next time!", "Clearly you're using a BB gun, get a real gun and try again!", "Did you just throw a firecracker? Go buy a shotgun and come back!","Wow, Could you be a worse shot?" ] if not game_status[network][chan]['game_on']: return "There is no activehunt right now. Use @starthunt to start a game." elif game_status[network][chan]['duck_status'] != 1: if game_status[network][chan]['no_duck_kick'] == 1: out = "KICK {} {} The last duck was already nabbed, try again with the next duck.".format(chan, nick) conn.send(out) return return "The last duck was already nabbed, try again with the next duck." else: game_status[network][chan]['shoot_time'] = time() deploy = game_status[network][chan]['duck_time'] shoot = game_status[network][chan]['shoot_time'] if nick.lower() in scripters: if scripters[nick.lower()] > shoot: notice("You are in a cool down period, you can try again in {} seconds.".format(str(scripters[nick.lower()] - shoot))) return chance = hit_or_miss(deploy, shoot) if not random.random() <= chance and chance > .05: out = random.choice(miss) + " You can try again in 3 seconds." scripters[nick.lower()] = shoot + 3 return out if chance == .05: out += "You pulled the trigger in {} seconds, that's mighty fast. Are you running a script for this game? Take a 2 hour cool down.".format(str(shoot - deploy)) scripters[nick.lower()] = shoot + 7200 if not random.random() <= chance: return random.choice(miss) + " " + out else: message(out) game_status[network][chan]['duck_status'] = 2 score = db.execute(select([table.c.shot]) \ .where(table.c.network == conn.name) \ .where(table.c.chan == chan.lower()) \ .where(table.c.name == nick.lower())).fetchone() if score: score = score[0] score += 1 dbupdate(nick, chan, db, conn, score, 0) else: score = 1 dbadd_entry(nick, chan, db, conn, score, 0) timer = "{:.3f}".format(shoot - deploy) duck = "duck" if score == 1 else "ducks" message("{} Perfect aim, you shot the duck in {} seconds! You have killed {} {} in {}.".format(nick, timer, score, duck, chan)) set_ducktime(chan, conn)
78e537caa4c2579226bfbb870a1e37cacd58279e
3,654,491
def pfunc_role_coverage(args): """Another intermediate function for parallelization; as for pfunc_doctor_banding.""" rota = args[0] role = args[1] return rota.get_role_coverage(role)
043ce250b428d443de90c7aa5fa8e8dcc2869303
3,654,492
def parse(s: str) -> Tree: """ Parse PENMAN-notation string *s* into its tree structure. Args: s: a string containing a single PENMAN-serialized graph Returns: The tree structure described by *s*. Example: >>> import penman >>> penman.parse('(b / bark-01 :ARG0 (d / dog))') # noqa Tree(('b', [('/', 'bark-01'), (':ARG0', ('d', [('/', 'dog')]))])) """ tokens = lex(s, pattern=PENMAN_RE) return _parse(tokens)
2a309be1e2a4d8c63130120f9497464811cc6e91
3,654,493
def subtract(v: Vector, w: Vector) -> Vector: """Subtracts corresponding elements""" assert len(v) == len(w), "vectors must be the same length" return [v_i - w_i for v_i, w_i in zip(v, w)]
6e81286b28a178981d970630104ac23bfc606e67
3,654,494
import os def get_QUTFish(image_path, train_ratio=0.8): """ get train and test dataset of QUTFish: https://wiki.qut.edu.au/display/cyphy/Fish+Dataset step1: download the dataset step2: set the root to QUT_fish_data/ :param image_path: the QUT_fish_data/ :param the percentage used for training :return: """ # if the images has been scanned before then just load train_images_file = 'data/QUTFish_train_images.npy' train_labels_file = 'data/QUTFish_train_labels.npy' test_images_file = 'data/QUTFish_test_images.npy' test_labels_file = 'data/QUTFish_test_labels.npy' if os.path.exists(train_images_file): print('Found pre-generated train/test lists!') images_train = np.load(train_images_file) labels_train = np.load(train_labels_file) images_val = np.load(test_images_file) labels_val = np.load(test_labels_file) images_train, labels_train = shuffle(images_train, labels_train) return images_train, labels_train, images_val, labels_val # scan the image folder to get the train and test image/label list images = [] labels = [] label_id = 0 # read label and image file list from final_all_index.txt # line format: 1=A73EGS~P=controlled=A73EGS~P_7=7s images_tmp = [] current_class = None with open(os.path.join(image_path, "final_all_index.txt")) as f: for line in f: names = line.split('=') if names[2] != 'insitu': continue if not os.path.exists(os.path.join(image_path, 'images/raw_images/' + names[3] + '.jpg')): continue # print(names) if current_class is None: current_class = int(names[0]) images_tmp.append(os.path.join(image_path, 'images/raw_images/' + names[3] + '.jpg')) else: if current_class == int(names[0]): images_tmp.append(os.path.join(image_path, 'images/raw_images/' + names[3] + '.jpg')) else: if len(images_tmp) > 10: # only save class has >10 images # append this class to dataset labels_tmp = np.ones(len(images_tmp))*label_id images.extend(images_tmp) labels.extend(labels_tmp.astype(np.int8).tolist()) label_id += 1 print('Dataset [QUTFish]: #class=%s, #sample=%s' % (label_id, len(images_tmp))) # move on to next class current_class = int(names[0]) images_tmp = [] images_tmp.append(os.path.join(image_path, 'images/raw_images/' + names[3] + '.jpg')) print('QUT: #classes: ', label_id, ', #images: ', len(images)) images_train, labels_train, images_val, labels_val = train_val_split(images, labels, train_ratio) # save the indexes to files np.save(train_images_file, np.asarray(images_train)) np.save(train_labels_file, np.asarray(labels_train)) np.save(test_images_file, np.asarray(images_val)) np.save(test_labels_file, np.asarray(labels_val)) # random shuffle images_train, labels_train = shuffle(images_train, labels_train) return images_train, labels_train, images_val, labels_val
728d23d47a5e81745ac707a2318e51b7d0ad42ed
3,654,495
def getWordScore(word, n): """ Returns the score for a word. Assumes the word is a valid word. The score for a word is the sum of the points for letters in the word, multiplied by the length of the word, PLUS 50 points if all n letters are used on the first turn. Letters are scored as in Scrabble; A is worth 1, B is worth 3, C is worth 3, D is worth 2, E is worth 1, and so on (see SCRABBLE_LETTER_VALUES) word: string (lowercase letters) n: integer (HAND_SIZE; i.e., hand size required for additional points) returns: int >= 0 """ result = 0 bonus = 0 if len(word) == n: bonus = 50 for letter in word: result += SCRABBLE_LETTER_VALUES[letter] result *= len(word) result += bonus return result
610ed561edf246cef2bfd9f6cc5e38904bb939ec
3,654,496
def get_commit(): """ Try to return the intended commit / release to deal with. Otherwise raise an acceptable error. 1) it was specified on the command line 2) use the current branch in the target repo """ commit = getattr(env, 'commit', None) or rev_parse('HEAD') if commit is None: raise RuntimeError( 'Unable to ascertain target commit from command line or git repo') return commit
90af53491335a7c616dc7a070394ec7408b7be52
3,654,497
def deg2hms(x): """Transform degrees to *hours:minutes:seconds* strings. Parameters ---------- x : float The degree value c [0, 360) to be written as a sexagesimal string. Returns ------- out : str The input angle written as a sexagesimal string, in the form, hours:minutes:seconds. """ if not 0.0 <= x < 360.0: raise ValueError("Bad RA value in degrees") _h = np.floor(x * 12.0 / 180.0) _m = np.floor((x * 12.0 / 180.0 - _h) * 60.0) _s = ((x * 12.0 / 180.0 - _h) * 60.0 - _m) * 60.0 hms = f"{_h:02.0f}:{_m:02.0f}:{_s:07.4f}" return hms
6572020a71d3abaac42c8826c6248c648535c3a9
3,654,498
def normalise_whitespace(row): """Return table row with normalised white space. This involves stripping leading and trailing whitespace, as well as consolidating white space to single spaces. """ pairs = ( (k, _normalise_cell(v)) for k, v in row.items()) return { k: v for k, v in pairs if not isinstance(v, str) or v}
10a580ef43c1cc47efc709fff05abd98bb332bcf
3,654,499