content
stringlengths
22
815k
id
int64
0
4.91M
def e() -> ProcessBuilder: """ Euler's number (e) :return: The numerical value of Euler's number. """ return process('e', )
5,354,800
def get_or_create_anonymous_cart_from_token(token, cart_queryset=Cart.objects.all()): """Returns open anonymous cart with given token or creates new. :type cart_queryset: saleor.cart.models.CartQueryset :type token: string :rtype: Cart """ return cart_queryset.open().filter(token=token, user=None).get_or_create( defaults={'user': None})[0]
5,354,801
def outcar_parser(request): """A fixture that loads OUTCAR.""" try: name = request.param except AttributeError: # Test not parametrized name = 'OUTCAR' testdir = os.path.dirname(__file__) outcarfile = testdir + '/' + name outcar = Outcar(file_path=outcarfile) return outcar
5,354,802
def accept_data(x: Any) -> Any: """Accept any types of data and return it as convenient type. Args: x: Any type of data. Returns: Any: Accepted data. """ if isinstance(x, str): return x elif isinstance(x, list): return x elif isinstance(x, dict): return x elif isinstance(x, tuple): return x elif isinstance(x, set): return x elif isinstance(x, float): return x elif isinstance(x, int): return x elif isinstance(x, bool): return x elif isinstance(x, type(None)): return x else: return x
5,354,803
def custom_model_template(model_type: str, target: str, result0: str, result1: str) -> str: """Template for feature behaviour reason generated from DICE Returns: str: behaviour """ if model_type == 'classifier': tipo = 'category' elif model_type == 'regressor': tipo = 'continuous' behaviour = get_behaviour(tipo = tipo, result0 = result0, result1 = result1) phrase = generic_type_template(tipo = tipo, name = target, behaviour = behaviour, result0 = result0, result1 = result1) result = color.BLUE + f" the output of the model {phrase}." + color.END return result
5,354,804
def merge_dict_list(merged, x): """ merge x into merged recursively. x is either a dict or a list """ if type(x) is list: return merged + x for key in x.keys(): if key not in merged.keys(): merged[key] = x[key] elif x[key] is not None: merged[key] = merge_dict_list(merged[key], x[key]) return merged
5,354,805
def test_certificate_index_page(rf): """ test for certificate index page """ home_page = HomePageFactory() assert models.CertificateIndexPage.can_create_at(home_page) certifcate_index_page = CertificateIndexPageFactory.create(parent=home_page) request = rf.get(certifcate_index_page.get_url()) bootcamp_run_page = BootcampRunPageFactory.create() certificate = BootcampRunCertificateFactory.create( bootcamp_run=bootcamp_run_page.bootcamp_run ) certificate_page = CertificatePageFactory.create(parent=bootcamp_run_page) request = rf.get(certificate_page.get_url()) assert ( certifcate_index_page.bootcamp_certificate( request, certificate.uuid ).status_code == 200 ) with pytest.raises(Http404): certifcate_index_page.bootcamp_certificate( request, "00000000-0000-0000-0000-000000000000" ) # Revoke the certificate and check index page returns 404 certificate.revoke() with pytest.raises(Http404): certifcate_index_page.bootcamp_certificate(request, certificate.uuid) with pytest.raises(Http404): certifcate_index_page.index_route(request)
5,354,806
def test__heavy_atom_indices(): """ test graph.heavy_atom_indices """ mgrph = (('H', 'H', 'C', 'H', 'C', 'C', 'H', 'H', 'H'), frozenset([(frozenset([5, 6]), 1), (frozenset([4, 7]), 1), (frozenset([8, 4]), 1), (frozenset([0, 5]), 1), (frozenset([1, 5]), 1), (frozenset([2, 5]), 1), (frozenset([2, 3]), 1), (frozenset([2, 4]), 2)])) assert graph.heavy_atom_indices(mgrph) == (2, 4, 5)
5,354,807
def is_ref(variant, exclude_alleles=None): """Returns true if variant is a reference record. Variant protos can encode sites that aren't actually mutations in the sample. For example, the record ref='A', alt='.' indicates that there is no mutation present (i.e., alt is the missing value). Args: variant: nucleus.genomics.v1.Variant. exclude_alleles: list(str). The alleles in this list will be ignored. Returns: True if there are no actual alternate alleles. """ relevant_alts = _non_excluded_alts(variant.alternate_bases, exclude_alleles) return not relevant_alts
5,354,808
def gomc_sim_completed_properly(job, control_filename_str): """General check to see if the gomc simulation was completed properly.""" job_run_properly_bool = False output_log_file = "out_{}.dat".format(control_filename_str) if job.isfile(output_log_file): # with open(f"workspace/{job.id}/{output_log_file}", "r") as fp: with open(f"{output_log_file}", "r") as fp: out_gomc = fp.readlines() for i, line in enumerate(out_gomc): if "Move" in line: split_move_line = line.split() if ( split_move_line[0] == "Move" and split_move_line[1] == "Type" and split_move_line[2] == "Mol." and split_move_line[3] == "Kind" ): job_run_properly_bool = True else: job_run_properly_bool = False return job_run_properly_bool
5,354,809
def reset_tasks(name=None): """ Resets tasks for constellation name. If name is None, tasks are reset for all running constellations. After reset, the current task is empty and any task that was - starting - running - stopping set to stopped, and it can't be run again. Stopped tasks are not affected """ names = [] if name: names = [name] else: names = get_constellation_names() for constellation_name in names: cs = ConstellationState(constellation_name) cs.set_value('current_task', '') tasks = cs.get_value('tasks') for task in tasks: task_id = task['task_id'] state = task['task_state'] if state not in ['ready']: cs.update_task_value(task_id, 'task_state', 'ready') cs.update_task_value(task_id, 'task_message', 'Ready to run')
5,354,810
def rv_precision( wavelength: Union[Quantity, ndarray], flux: Union[Quantity, ndarray], mask: Optional[ndarray] = None, **kwargs, ) -> Quantity: """Calculate the theoretical RV precision achievable on a spectrum. Parameters ---------- wavelength: array-like or Quantity Wavelength of spectrum. flux: array-like or Quantity Flux of spectrum. mask: array-like, Quantity or None Masking function array to apply to the pixel weights. kwargs: Kwargs for sqrt_sum_wis Returns ------- RVrms: astropy.Quantity Radial velocity precision of spectra in m/s. """ return c / sqrt_sum_wis(wavelength, flux, mask=mask, **kwargs)
5,354,811
def changenonetoNone(s): """Convert str 'None' to Nonetype """ if s=='None': return None else: return s
5,354,812
def quaternion_2_rotation_matrix(q): """ 四元数转化为旋转矩阵 :param q: :return: 旋转矩阵 """ rotation_matrix = np.array([[np.square(q[0]) + np.square(q[1]) - np.square(q[2]) - np.square(q[3]), 2 * (q[1] * q[2] - q[0] * q[3]), 2 * (q[1] * q[3] + q[0] * q[2])], [2 * (q[1] * q[2] + q[0] * q[3]), np.square(q[0]) - np.square(q[1]) + np.square(q[2]) - np.square(q[3]), 2 * (q[2] * q[3] - q[0] * q[1])], [2 * (q[1] * q[3] - q[0] * q[2]), 2 * (q[2] * q[3] + q[0] * q[1]), np.square(q[0]) - np.square(q[1]) - np.square(q[2]) + np.square(q[3])]], dtype=np.float32) return rotation_matrix
5,354,813
def sum(mat, axis, target=None): """ Sum the matrix along the given dimension, where 0 represents the leading dimension and 1 represents the non-leading dimension. If a target is not prvided, a new vector is created for storing the result. """ m = _eigenmat.get_leading_dimension(mat.p_mat) n = _eigenmat.get_nonleading_dimension(mat.p_mat) if axis == 0: # sum along leading dimension if not target: target = empty((1, n)) elif axis == 1: # sum along non-leading dimension if not target: target = empty((m, 1)) err_code = _eigenmat.sum_by_axis(mat.p_mat, target.p_mat, ct.c_int(axis)) if err_code: raise generate_exception(err_code) return target
5,354,814
def extract_wikidata_prop(): """ Obtain all the relevant triples of the movies from the wikidata and output the percentage of coverage from all the movies on the dataset :return: a csv file with all properties related to the movies form the latest small movielens dataset """ # read movies link dataset and add the full imdbid column that matches with the wikidata format "ttXXXXXXX" all_movies = read_movie_info() links = read_links_info() s_all_movies = len(all_movies) links['full_imdbId'] = links['imdbId'].apply(lambda x: "tt" + str(format(x, '07d'))) # create output, final dataframe with all properties of movies all_movie_props = pd.DataFrame(columns=['movieId', 'title', 'prop', 'obj']) # obtaind properties of movies in 300 movies batches begin = 0 end = 350 total = len(links) # Obtain data from wikidata print("Start obtaining movie data") while end <= total: results = from_wikidata.get_movie_data_from_wikidata(links.iloc[begin:end]) all_movie_props = all_movie_props.append(results) print("From " + str(begin) + " to " + str(end - 1) + " obtained from Wikidata") begin = end end = end + 300 time.sleep(60) print("End obtaining movie data") # save output all_movie_props.to_csv(wikidata_props_ml_small, mode='w', header=True, index=False) print("Coverage: " + str(len(all_movie_props['movieId'].unique())) + " obtained of " + str(s_all_movies) + ". Percentage: " + str(len(all_movie_props['movieId'].unique()) / s_all_movies)) print('Output file generated')
5,354,815
def convert_created_time_to_datetime(datestring): """ Args: datestring (str): a string object either as a date or a unix timestamp Returns: a pandas datetime object """ if len(datestring) == 30: return pd.to_datetime(datestring) else: return pd.to_datetime(datetime.fromtimestamp(int(datestring[:10])))
5,354,816
def clear_fixture_quickcache(domain, data_types): """ Clears quickcache for fixtures.dbaccessors Args: :domain: The domain that has been updated :data_types: List of FixtureDataType objects with stale cache """ if not data_types: return type_ids = set() for data_type in data_types: type_ids.add(data_type.get_id) data_type.clear_caches() from corehq.apps.fixtures.dbaccessors import get_fixture_items_for_data_types get_fixture_items_for_data_types.clear(domain, type_ids) # We always call get_fixture_items_for_data_types with a list of all global # type ids when doing a restore (i.e. the cache key is a set of all global # type ids) So when updating just a subset of types, we still need to clear # the cache key that contains all types. global_type_ids = {dt.get_id for dt in FixtureDataType.by_domain(domain) if dt.is_global} get_fixture_items_for_data_types.clear(domain, global_type_ids)
5,354,817
def identify_word_classes(tokens, word_classes): """ Match word classes to the token list :param list tokens: List of tokens :param dict word_classes: Dictionary of word lists to find and tag with the respective dictionary key :return: Matched word classes :rtype: list """ if word_classes is None: word_classes = [] classes = set() for key in word_classes: for token in tokens: if token.lower() in word_classes[key]: classes.add(key) return classes
5,354,818
def find_connecting_stops(routes) -> List[Tuple[Stop, List[Route]]]: """ Find all stops that connect more than one route. Return [Stop, [Route]] """ stops = {} for route in sorted(routes, key=Route.name): for stop in route.stops(): id_ = stop.id() if id_ not in stops: stops[id_] = (stop, []) last(stops[id_]).append(route) return list(filter(lambda p: length(last(p)) > 1, stops.values()))
5,354,819
def freeze_loop(src, start, end, loopStart, loopEnd=None): """ Freezes a range of frames form start to end using the frames comprended between loopStart and loopEnd. If no end frames are provided for the range or the loop, start frames will be used instead. """ core = vs.get_core() if loopEnd is None: loopEnd = loopStart if start < 0 or start > src.num_frames - 1: raise ValueError('start frame out of bounds: {}.'.format(start)) if loopStart < 0 or loopStart > src.num_frames - 1: raise ValueError('loop start frame out of bounds: {}.'.format(loopStart)) if end < start or end > src.num_frames - 1: raise ValueError('end frame out of bounds: {}.'.format(end)) if loopEnd < loopStart or loopEnd > src.num_frames - 1: raise ValueError('loop end out of bounds: {}.'.format(loopEnd)) loop = core.std.Loop(src[loopStart:loopEnd + 1], 0) span = end - start + 1 if start != 0: final = src[:start] + loop[:span] else: final = loop[:span] if end < src.num_frames - 1: final = final + src[end + 1:] if src.num_frames != final.num_frames: raise ValueError( 'input / output framecount missmatch (got: {}; expected: {}).'.format( final.num_frames, src.num_frames)) return final
5,354,820
def timevalue(cflo, prate, base_date=0, utility=None): """ Computes the equivalent net value of a generic cashflow at time `base_date` using the periodic interest rate `prate`. If `base_date` is 0, `timevalue` computes the net present value of the cashflow. If `base_date` is the index of the last element of `cflo`, this function computes the equivalent future value. Args: cflo (pandas.Series, list of pandas.Series): Generic cashflow. prate (pandas.Series): Periodic interest rate. base_date (int, tuple): Time. utility (function): Utility function. Returns: Float or list of floats. **Examples.** >>> cflo = cashflow([-732.54] + [100]*8, start='2000Q1', freq='Q') >>> prate = interest_rate([2]*9, start='2000Q1', freq='Q') >>> timevalue(cflo, prate) # doctest: +ELLIPSIS 0.00... >>> prate = interest_rate([12]*5, start='2000Q1', freq='Q') >>> cflo = cashflow([-200]+[100]*4, start='2000Q1', freq='Q') >>> timevalue(cflo, prate) # doctest: +ELLIPSIS 103.73... >>> timevalue(cflo, prate, 4) # doctest: +ELLIPSIS 163.22... >>> prate = interest_rate([12]*5, start='2000Q1', freq='Q') >>> cflo = cashflow([-200] + [100]*4, start='2000Q1', freq='Q') >>> timevalue(cflo=cflo, prate=prate) # doctest: +ELLIPSIS 103.73... >>> timevalue(cflo=[cflo, cflo], prate=prate) # doctest: +ELLIPSIS 0 103.734935 1 103.734935 dtype: float64 """ if isinstance(cflo, pd.Series): cflo = [cflo] if not isinstance(prate, pd.Series): raise TypeError("`prate` must be a pandas.Series") verify_period_range(cflo + [prate]) retval = pd.Series([0] * len(cflo), dtype=np.float64) factor = to_discount_factor(prate=prate, base_date=base_date) for index, xcflo in enumerate(cflo): netval = 0 for time, _ in enumerate(xcflo): netval += xcflo[time] * factor[time] retval[index] = netval if len(retval) == 1: return retval[0] return retval
5,354,821
def munge(examples, multiplier, prob, loc_var, data_t, seed=0): """ Generates a dataset from the original one :param examples: Training examples :type examples: 2d numpy array :param multiplier: size multiplier :type multiplier: int k :param prob: probability of swapping values :type prob: flt (0 to 1) :param loc_var: local variance parameter :type loc_var: flt :param data_t: Identifies whether or not the attribute is continuous or nominal :type data_t: Numpy array of strs """ np.random.seed(seed) new_dataset = None continuous = [True if x == FeatureType.CONTINUOUS else False for x in data_t] nominal = np.logical_not(continuous) data_c = examples[:, continuous].astype(float) # Scales data linearly from 0 to 1 norm_data_c = normalize(data_c - np.min(data_c, axis=0), axis=0, norm='max') data_n = examples[:, nominal] indicies = nn(norm_data_c, data_n) for i in range(multiplier): T_prime = np.copy(examples) # Runs through all the examples in the dataset for j in range(examples.shape[0]): index = indicies[j, 1] if indicies[j, 0] == j else indicies[j, 0] pt1 = T_prime[j, :] pt2 = T_prime[index, :] # Runs through all features for an example and its nn for k in range(len(data_t)): # Swaps the two fields with probability prob if np.random.ranf() < prob: if data_t[k] == FeatureType.CONTINUOUS: std = abs(float(pt1[k]) - float(pt2[k])) / loc_var temp = float(pt1[k]) pt1[k] = np.random.normal(float(pt2[k]), std) pt2[k] = np.random.normal(temp, std) else: temp = pt1[k] pt1[k] = pt2[k] pt2[k] = temp # Combines the dataset to the final one if new_dataset is None: new_dataset = np.copy(T_prime) else: new_dataset = np.vstack((new_dataset, T_prime)) return new_dataset
5,354,822
def is_interested_source_code_file(afile): """ If a file is the source code file that we are interested. """ tokens = afile.split(".") if len(tokens) > 1 and tokens[-1] in ("c", "cpp", "pl", "tmpl", "py", "s", "S"): # we care about C/C++/perl/template/python/assembly source code files return True return False
5,354,823
def write_mirror_mesh_dict(case, normalLine): """defines the axes for mirroring the quarter cylinder""" mirror_mesh_dict = { 'planeType' : 'pointAndNormal', 'pointAndNormalDict' : {'basePoint':[0,0,0], 'normalVector':normalLine}, 'planeTolerance' : 1e-06 } with case.mutable_data_file(FileName.MIRROR_MESH) as d: d.update(mirror_mesh_dict)
5,354,824
def handle_survey_answers(): """ Receives form data adds submission to database. Args: data (str): From the POST request arguments. Should in JSON form and have all the quiz response information. Raises: AssertionError: When the quiz type is not valid. Returns: str: The ID of the quiz entry in the database. """ # Load the JSON as dictionary. entry = json.loads(request.form['data']) # Add the current timestamp to the data. timestamp = time.time() entry['timestamp_secs'] = timestamp entry_string = json.dumps(entry, indent=4, sort_keys=True) logging.debug(entry_string) db = get_db() # Use the form type to access differnt collections. form_type = entry['form_type'] responses_col = None # Differentiate between men and women quiz. if form_type == 'men': responses_col = db.responses_men elif form_type == 'women': responses_col = db.responses_women else: logging.warning("Form Type is not 'men' or 'women': {}".format( form_type)) raise AssertionError("Form Type is not 'men' or 'women': {}".format( form_type)) # Update responses counter. responses_col.find_one_and_update( {'_id': 'responses'}, {'$inc': {'count': 1}}, upsert=True) # Insert the response information. response_id = responses_col.insert_one(entry).inserted_id resp = {"id": str(response_id)} return jsonify(resp)
5,354,825
def recursively_extract(node, exfun, maxdepth=2): """ Transform a html ul/ol tree into a python list tree. Converts a html node containing ordered and unordered lists and list items into an object of lists with tree-like structure. Leaves are retrieved by applying `exfun` function to the html nodes not containing any ul/ol list. Args: node: BeautifulSoup HTML node to traverse exfun: function to apply to every string node found maxdepth: maximal depth of lists to go in the node Returns: A tree-like python object composed of lists. Examples: >>> node_content = \ ''' <ol> <li>Hase</li> <li>Nase<ol><li>Eins</li><li>Zwei</li></ol></li> </ol>''' >>> node = BeautifulSoup(node_content, "lxml") >>> recursively_extract(node, lambda x: x) [<li>Hase</li>, [<li>Eins</li>, <li>Zwei</li>]] >>> recursively_extract(node, lambda x: x.get_text()) ['Hase', ['Eins', 'Zwei']] """ if node.name in ['ol', 'ul']: lilist = node else: lilist = node.ol or node.ul if lilist and maxdepth: # apply 'recursively_extract' to every 'li' node found under this node return [recursively_extract(li, exfun, maxdepth=(maxdepth - 1)) for li in lilist.find_all('li', recursive=False)] # if this node doesn't contain 'ol' or 'ul' node, return the transformed # leaf (using the 'exfun' function) return exfun(node)
5,354,826
def run_cmd(command): """ command execution Run taken command as a process and raise error if any """ proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) proc.wait() if proc.returncode != 0: out, err = proc.communicate() raise Exception( "The tokenizing could not be completed (returncode=%i): %s %s" % (proc.returncode, out, err) ) out, err = proc.communicate()
5,354,827
def group_v2_deconv_decoder(latent_tensor, output_shape, hy_ncut=1, group_feats_size=gin.REQUIRED, lie_alg_init_scale=gin.REQUIRED, lie_alg_init_type=gin.REQUIRED, n_act_points=gin.REQUIRED, is_training=True): """Convolutional decoder used in beta-VAE paper for the chairs data. Based on row 3 of Table 1 on page 13 of "beta-VAE: Learning Basic Visual Concepts with a Constrained Variational Framework" (https://openreview.net/forum?id=Sy2fzU9gl) Here we add an extra linear mapping for group features extraction. Args: latent_tensor: Input tensor of shape (batch_size,) to connect decoder to. output_shape: Shape of the data. group_feats_size: The dimension of group features. is_training: Whether or not the graph is built for training (UNUSED). Returns: Output tensor of shape (batch_size, 64, 64, num_channels) with the [0,1] pixel intensities. group_feats: Group features. """ # del is_training lie_alg_basis_ls = [] latent_dim = latent_tensor.get_shape().as_list()[-1] latents_in_cut_ls = split_latents(latent_tensor, hy_ncut=hy_ncut) # [x0, x1] mat_dim = int(math.sqrt(group_feats_size)) for i in range(latent_dim): init = tf.initializers.random_normal(0, lie_alg_init_scale) lie_alg_tmp = tf.get_variable('lie_alg_' + str(i), shape=[1, mat_dim, mat_dim], initializer=init) if lie_alg_init_type == 'oth': lie_alg_tmp = tf.matrix_band_part(lie_alg_tmp, 0, -1) lie_alg_tmp = lie_alg_tmp - tf.transpose(lie_alg_tmp, perm=[0, 2, 1]) lie_alg_basis_ls.append(lie_alg_tmp) lie_alg_basis = tf.concat(lie_alg_basis_ls, axis=0)[tf.newaxis, ...] # [1, lat_dim, mat_dim, mat_dim] lie_alg = 0 lie_group = tf.eye(mat_dim, dtype=lie_alg_basis_ls[0].dtype)[tf.newaxis, ...] for i, lie_alg_basis_i in enumerate(lie_alg_basis_ls): lie_alg_tmp = lie_alg_basis_i * latent_tensor[:, i][..., tf.newaxis, tf.newaxis] lie_alg = lie_alg + lie_alg_tmp lie_group_tmp = tf.linalg.expm( lie_alg_tmp) # [b, mat_dim, mat_dim] lie_group = tf.matmul(lie_group_tmp, lie_group) # if not is_training: # lie_alg_mul = latent_tensor[ # ..., tf.newaxis, tf. # newaxis] * lie_alg_basis # [b, lat_dim, mat_dim, mat_dim] # lie_alg = tf.reduce_sum(lie_alg_mul, axis=1) # [b, mat_dim, mat_dim] # lie_group = tf.linalg.expm(lie_alg) # [b, mat_dim, mat_dim] # else: # lie_group = tf.eye( # mat_dim, # dtype=latents_in_cut_ls[0].dtype)[tf.newaxis, ...] # lie_alg = 0 # for latents_in_cut_i in latents_in_cut_ls: # lie_alg_mul_tmp = latents_in_cut_i[ # ..., tf.newaxis, tf.newaxis] * lie_alg_basis # [b, lat_dim, mat_dim, mat_dim] # lie_alg_tmp = tf.reduce_sum( # lie_alg_mul_tmp, # axis=1) # [b, mat_dim, mat_dim] # lie_alg = lie_alg + lie_alg_tmp # lie_group_tmp = tf.linalg.expm( # lie_alg_tmp) # [b, mat_dim, mat_dim] # lie_group = tf.matmul(lie_group, # lie_group_tmp) transed_act_points_tensor = tf.reshape(lie_group, [-1, mat_dim * mat_dim]) # lie_alg_mul = latent_tensor[ # ..., tf.newaxis, tf. # newaxis] * lie_alg_basis # [b, lat_dim, mat_dim, mat_dim] # lie_alg = tf.reduce_sum(lie_alg_mul, axis=1) # [b, mat_dim, mat_dim] # lie_group = tf.linalg.expm(lie_alg) # [b, mat_dim, mat_dim] # act_init = tf.initializers.random_normal(0, 0.01) # act_points = tf.get_variable('act_points', # shape=[1, mat_dim, n_act_points], # initializer=act_init) # transed_act_points = tf.matmul(lie_group, act_points) # transed_act_points_tensor = tf.reshape(transed_act_points, # [-1, mat_dim * n_act_points]) d1 = tf.layers.dense(transed_act_points_tensor, 256, activation=tf.nn.relu) d2 = tf.layers.dense(d1, 1024, activation=tf.nn.relu) d2_reshaped = tf.reshape(d2, shape=[-1, 4, 4, 64]) d3 = tf.layers.conv2d_transpose( inputs=d2_reshaped, filters=64, kernel_size=4, strides=2, activation=tf.nn.relu, padding="same", ) d4 = tf.layers.conv2d_transpose( inputs=d3, filters=32, kernel_size=4, strides=2, activation=tf.nn.relu, padding="same", ) d5 = tf.layers.conv2d_transpose( inputs=d4, filters=32, kernel_size=4, strides=2, activation=tf.nn.relu, padding="same", ) d6 = tf.layers.conv2d_transpose( inputs=d5, filters=output_shape[2], kernel_size=4, strides=2, padding="same", ) return tf.reshape(d6, [-1] + output_shape), lie_group, lie_alg_basis
5,354,828
def cptfile2dict(filepath): """ Extracts a color dictionary and list for a colormap object from a .cpt file Parameters ---------- filepath: str filepath of a .cpt file including file extension Returns ------- colormap name, list containing all colors, dictionary containing all colors """ if not os.path.exists(filepath): raise ImportError("file ", filepath, "not found") file = open(filepath) name = os.path.splitext(os.path.basename(filepath))[0] lines = file.readlines() file.close() x = [] r = [] g = [] b = [] color_model = "RGB" for l in lines: ls = l.split() if l.strip(): if l[0] == "#": if ls[-1] == "HSV": color_model = "HSV" continue if ls[0] == "B" or ls[0] == "F" or ls[0] == "N": pass else: x.append(float(ls[0])) r.append(float(ls[1])) g.append(float(ls[2])) b.append(float(ls[3])) xtemp = float(ls[4]) rtemp = float(ls[5]) gtemp = float(ls[6]) btemp = float(ls[7]) else: continue x.append(xtemp) r.append(rtemp) g.append(gtemp) b.append(btemp) x = np.array(x, dtype=np.float64) r = np.array(r, dtype=np.float64) g = np.array(g, dtype=np.float64) b = np.array(b, dtype=np.float64) if color_model == "HSV": for i in range(r.shape[0]): rr, gg, bb = colorsys.hsv_to_rgb(r[i] / 360., g[i], b[i]) r[i] = rr g[i] = gg b[i] = bb if color_model == "RGB": r = r/255 g = g/255 b = b/255 x_norm = (x - x[0])/(x[-1] - x[0]) col_list = [(r[i], g[i], b[i]) for i in range(len(r))] red = [] green = [] blue = [] for i in range(len(x)): red.append([x_norm[i], r[i], r[i]]) green.append([x_norm[i], g[i], g[i]]) blue.append([x_norm[i], b[i], b[i]]) color_dict = {"red": red, "green": green, "blue": blue} return name, col_list, color_dict
5,354,829
def test_tensor_array_of_numpy_arrays_four(): """Performing tensor product on four numpy array of numpy arrays.""" input_arr = np.array([np.identity(2), np.identity(2), np.identity(2), np.identity(2)]) res = tensor(input_arr) expected_res = np.identity(16) bool_mat = np.isclose(res, expected_res) np.testing.assert_equal(np.all(bool_mat), True)
5,354,830
def UncertaintyLossNet(): """Creates Uncertainty weighted loss model https://arxiv.org/abs/1705.07115 """ l1 = layers.Input(shape=()) l2 = layers.Input(shape=()) loss = UncertaintyWeightedLoss()([l1, l2]) model = Model(inputs=[l1, l2], outputs=loss) return model
5,354,831
def inject_signals( frame_files: Iterable[str], channels: [str], ifos: [str], prior_file: str, n_samples: int, outdir: str, fmin: float = 20, waveform_duration: float = 8, snr_range: Iterable[float] = [25, 50], ): """Injects simulated BBH signals into a frame, or set of corresponding frames from different interferometers. Frames should have the same start/stop time and the same sample rate Args: frame_files: list of paths to frames to be injected channels: channel names of the strain data in each frame ifos: list of interferometers corresponding to frames, e.g., H1, L1 prior_file: prior file for bilby to sample from n_samples: number of signal to inject outdir: output directory to which injected frames will be written fmin: Minimum frequency for highpass filter waveform_duration: length of injected waveforms snr_range: desired signal SNR range Returns: Paths to the injected frames and the parameter file """ strains = [ TimeSeries.read(frame, ch) for frame, ch in zip(frame_files, channels) ] logging.info("Read strain from frame files") span = set([strain.span for strain in strains]) if len(span) != 1: raise ValueError( "Frame files {} and {} have different durations".format( *frame_files ) ) frame_start, frame_stop = next(iter(span)) frame_duration = frame_stop - frame_start sample_rate = set([int(strain.sample_rate.value) for strain in strains]) if len(sample_rate) != 1: raise ValueError( "Frame files {} and {} have different sample rates".format( *frame_files ) ) sample_rate = next(iter(sample_rate)) fftlength = int(max(2, np.ceil(2048 / sample_rate))) # set the non-overlapping times of the signals in the frames randomly # leaves buffer at either end of the series so edge effects aren't an issue signal_times = sorted( np.random.choice( np.arange( waveform_duration, frame_duration - waveform_duration, waveform_duration, ), size=n_samples, replace=False, ) ) # log and print out some simulation parameters logging.info("Simulation parameters") logging.info("Number of samples : {}".format(n_samples)) logging.info("Sample rate [Hz] : {}".format(sample_rate)) logging.info("High pass filter [Hz] : {}".format(fmin)) logging.info("Prior file : {}".format(prior_file)) # define a Bilby waveform generator waveform_generator = bilby.gw.WaveformGenerator( duration=waveform_duration, sampling_frequency=sample_rate, frequency_domain_source_model=lal_binary_black_hole, parameter_conversion=convert_to_lal_binary_black_hole_parameters, waveform_arguments={ "waveform_approximant": "IMRPhenomPv2", "reference_frequency": 50, "minimum_frequency": 20, }, ) # sample GW parameters from prior distribution priors = bilby.gw.prior.BBHPriorDict(prior_file) sample_params = priors.sample(n_samples) sample_params["geocent_time"] = signal_times signals_list = [] snr_list = [] for strain, channel, ifo in zip(strains, channels, ifos): # calculate the PSD strain_psd = strain.psd(fftlength) # generate GW waveforms raw_signals = generate_gw( sample_params, waveform_generator=waveform_generator, ) signals, snr = project_raw_gw( raw_signals, sample_params, waveform_generator, ifo, get_snr=True, noise_psd=strain_psd, ) signals_list.append(signals) snr_list.append(snr) old_snr = np.sqrt(np.sum(np.square(snr_list), axis=0)) new_snr = np.random.uniform(snr_range[0], snr_range[1], len(snr_list[0])) signals_list = [ signals * (new_snr / old_snr)[:, None] for signals in signals_list ] sample_params["luminosity_distance"] = ( sample_params["luminosity_distance"] * old_snr / new_snr ) snr_list = [snr * new_snr / old_snr for snr in snr_list] outdir = Path(outdir) frame_out_paths = [outdir / f.name for f in map(Path, frame_files)] for strain, signals, frame_path in zip( strains, signals_list, frame_out_paths ): for i in range(n_samples): idx1 = int( (signal_times[i] - waveform_duration / 2.0) * sample_rate ) idx2 = idx1 + waveform_duration * sample_rate strain[idx1:idx2] += signals[i] strain.write(frame_path) # Write params and similar to output file param_file = outdir / f"param_file_{frame_start}-{frame_stop}.h5" with h5py.File(param_file, "w") as f: # write signals attributes, snr, and signal parameters params_gr = f.create_group("signal_params") for k, v in sample_params.items(): params_gr.create_dataset(k, data=v) # Save signal times as actual GPS times f.create_dataset("GPS-start", data=signal_times + frame_start) for i, ifo in enumerate(ifos): ifo_gr = f.create_group(ifo) ifo_gr.create_dataset("signal", data=signals_list[i]) ifo_gr.create_dataset("snr", data=snr_list[i]) # write frame attributes f.attrs.update( { "size": n_samples, "frame_start": frame_start, "frame_stop": frame_stop, "sample_rate": sample_rate, "psd_fftlength": fftlength, } ) # Update signal attributes f.attrs["waveform_duration"] = waveform_duration f.attrs["flag"] = "GW" return frame_out_paths, param_file
5,354,832
async def lyric(id: int, endpoint: NeteaseEndpoint = Depends(requestClient)): """ ## Name: `lyric` > 歌词 --- ### Required: - ***int*** **`id`** - Description: 单曲ID """ return await endpoint.lyric(id=id)
5,354,833
def test_ae_jaguar(): """ Test autoencoder forecasting with the Jaguar dataset """ # Sample data df = jaguar() # Hyperparameters batch_size = 10 num_past = 10 num_future = 5 # Prepare the dataloader data_loaders = dataset.MultiModalDataLoader( df, batch_size=batch_size, n_past=num_past, n_future=num_future, num_workers=1, train_split_ratio=0.5, validation_split_ratio=0.2, ) model_save_path = "./model.pt" model = MultiModelAE( input_size=2, num_past=num_past, batch_size=batch_size, num_future=num_future, lstm_hidden_size=32, num_lstm_layers=2, output_size=2, latent_size=10, batch_first=True, dropout=0.1, reset_state=True, bidirectional=False, ) # Model Trainer # Model types; "ae" or "vae" trainer = HybridTrainer(model=model, optimizer_type="Adam", loss_type="huber") # Train the model trainer.fit(data_loaders, model_save_path, epochs=5, training_mode="forecasting", validate_every=2, test_every=5) trainer.fit(data_loaders, model_save_path, epochs=5, training_mode="forecasting", validate_every=None, test_every=5) trainer.fit(data_loaders, model_save_path, epochs=5, training_mode="forecasting", validate_every=2, test_every=None) trainer.validate(data_loaders["sequential_validation_loader"])
5,354,834
def get_body(m): """extract the plain text body. return the body""" if m.is_multipart(): body = m.get_body(preferencelist=('plain',)).get_payload(decode=True) else: body = m.get_payload(decode=True) if isinstance(body, bytes): return body.decode() else: return body
5,354,835
def get_image(img_path, ch=3, scale=None, tile_size=None, interpolate=cv2.INTER_AREA): """ Loads image data into standard Numpy array Reads image and reverses channel order. Loads image as 8 bit (regardless of original depth) Parameters ------ img_path: str Image file path. ch: int Number of input channels (default = 3). scale: float Scaling factor. tile_size: int Tile dimension (square). interpolate: int Interpolation method (OpenCV). Returns ------ numpy array Image array; formats: grayscale: [HW]; colour: [HWC]. w: int Image width (px). h: int Image height (px). w_resized: int Image width resized (px). h_resized: int Image height resized (px). """ assert ch == 3 or ch == 1, 'Invalid number of input channels:\t{}.'.format(ch) assert os.path.exists(img_path), 'Image path {} does not exist.'.format(img_path) if not tile_size: tile_size = defaults.tile_size # verify image channel number img = cv2.imread(img_path, cv2.IMREAD_COLOR) if is_grayscale(img) and ch == 3: print('\nInput image is grayscale but process expects colour (RGB).\n\tApplication stopped.') exit(1) elif not is_grayscale(img) and ch == 1: if input("\nInput image is in colour (RGB) but process expects grayscale. " "Apply grayscale filter? (Enter \'Y\' or \'y\' for Yes): ") in ['Y', 'y']: grayscale(img) # load image data if ch == 3: img = cv2.imread(img_path, cv2.IMREAD_COLOR) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) else: img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE) # get dimensions height, width = img.shape[:2] height_resized = height width_resized = width # apply scaling if scale: min_dim = min(height, width) # adjust scale to minimum size (tile dimensions) if min_dim < tile_size: scale = tile_size / min_dim dim = (int(scale * width), int(scale * height)) img = cv2.resize(img, dim, interpolation=interpolate) height_resized, width_resized = img.shape[:2] return img, width, height, width_resized, height_resized
5,354,836
def density(mass, volume): """ Calculate density. """ return mass / volume * 1
5,354,837
def is_pascal_case(key: str) -> None: """ Asserts that a value is PascalCased. :param key: str :return: None :raises: django_swagger_tester.exceptions.CaseError """ logger.debug('Verifying that `%s` is properly pascal cased', key) if len(key) == 0: return if len(key) == 1 and (key.isalpha() is False or (key.isalpha() is True and key != key.upper())): logger.error('%s is not pascal cased', key) raise CaseError(f'The property `{key}` is not properly PascalCased') pascal_cased_key = key[0].upper() + re.sub(r'[\-_.\s]([a-z])', lambda matched: matched.group(1).upper(), key[1:]) if pascal_cased_key != key: logger.error('%s is not pascal cased', key) raise CaseError(f'The property `{key}` is not properly PascalCased')
5,354,838
def test__anharmonic_zpve(): """ test the anharmonic ZPVE read/write functions """ ref_anh_zpve = -25.123455 anh_zpve_file_name = autofile.data_types.name.anharmonic_zpve('test') anh_zpve_file_path = os.path.join(TMP_DIR, anh_zpve_file_name) anh_zpve_str = autofile.data_types.swrite.anharmonic_zpve(ref_anh_zpve) assert not os.path.isfile(anh_zpve_file_path) autofile.io_.write_file(anh_zpve_file_path, anh_zpve_str) assert os.path.isfile(anh_zpve_file_path) anh_zpve_str = autofile.io_.read_file(anh_zpve_file_path) anh_zpve = autofile.data_types.sread.anharmonic_zpve(anh_zpve_str) assert numpy.isclose(ref_anh_zpve, anh_zpve)
5,354,839
def _is_global(obj, name=None): """Determine if obj can be pickled as attribute of a file-backed module""" if name is None: name = getattr(obj, '__qualname__', None) if name is None: name = getattr(obj, '__name__', None) module_name = _whichmodule(obj, name) if module_name is None: # In this case, obj.__module__ is None AND obj was not found in any # imported module. obj is thus treated as dynamic. return False if module_name == "__main__": return False module = sys.modules.get(module_name, None) if module is None: # The main reason why obj's module would not be imported is that this # module has been dynamically created, using for example # types.ModuleType. The other possibility is that module was removed # from sys.modules after obj was created/imported. But this case is not # supported, as the standard pickle does not support it either. return False # module has been added to sys.modules, but it can still be dynamic. if _is_dynamic(module): return False try: obj2, parent = _getattribute(module, name) except AttributeError: # obj was not found inside the module it points to return False return obj2 is obj
5,354,840
def download(args): """ Download artefacts matching the regular expressions. """ storage = s3.S3Storage(args.storage_endpoint) matching_objects = storage.search(args.expression) if not matching_objects: LOGGER.warning("Did not found any matching folders/artefacts") return LOGGER.info("{} artefacts to download:\n{}".format( len(matching_objects), "\n".join(matching_objects))) if _user_confirm(): for name in matching_objects: storage.download(name) LOGGER.info("{} downloaded in current directory".format(name)) else: LOGGER.info("Aborted")
5,354,841
def get_augmenter(augmenter_type: str, image_size: ImageSizeType, dataset_mean: DatasetStatType, dataset_std: DatasetStatType, padding: PaddingInputType = 1. / 8., pad_if_needed: bool = False, subset_size: int = 2) -> Union[Module, Callable]: """ Args: augmenter_type: augmenter type image_size: (height, width) image size dataset_mean: dataset mean value in CHW dataset_std: dataset standard deviation in CHW padding: percent of image size to pad on each border of the image. If a sequence of length 4 is provided, it is used to pad left, top, right, bottom borders respectively. If a sequence of length 2 is provided, it is used to pad left/right, top/bottom borders, respectively. pad_if_needed: bool flag for RandomCrop "pad_if_needed" option subset_size: number of augmentations used in subset Returns: nn.Module for Kornia augmentation or Callable for torchvision transform """ if not isinstance(padding, tuple): assert isinstance(padding, float) padding = (padding, padding, padding, padding) assert len(padding) == 2 or len(padding) == 4 if len(padding) == 2: # padding of length 2 is used to pad left/right, top/bottom borders, respectively # padding of length 4 is used to pad left, top, right, bottom borders respectively padding = (padding[0], padding[1], padding[0], padding[1]) # image_size is of shape (h,w); padding values is [left, top, right, bottom] borders padding = ( int(image_size[1] * padding[0]), int(image_size[0] * padding[1]), int(image_size[1] * padding[2]), int(image_size[0] * padding[3]) ) augmenter_type = augmenter_type.strip().lower() if augmenter_type == "simple": return nn.Sequential( K.RandomCrop(size=image_size, padding=padding, pad_if_needed=pad_if_needed, padding_mode='reflect'), K.RandomHorizontalFlip(p=0.5), K.Normalize(mean=torch.tensor(dataset_mean, dtype=torch.float32), std=torch.tensor(dataset_std, dtype=torch.float32)), ) elif augmenter_type == "fixed": return nn.Sequential( K.RandomHorizontalFlip(p=0.5), # K.RandomVerticalFlip(p=0.2), K.RandomResizedCrop(size=image_size, scale=(0.8, 1.0), ratio=(1., 1.)), RandomAugmentation( p=0.5, augmentation=F.GaussianBlur2d( kernel_size=(3, 3), sigma=(1.5, 1.5), border_type='constant' ) ), K.ColorJitter(contrast=(0.75, 1.5)), # additive Gaussian noise K.RandomErasing(p=0.1), # Multiply K.RandomAffine( degrees=(-25., 25.), translate=(0.2, 0.2), scale=(0.8, 1.2), shear=(-8., 8.) ), K.Normalize(mean=torch.tensor(dataset_mean, dtype=torch.float32), std=torch.tensor(dataset_std, dtype=torch.float32)), ) elif augmenter_type in ["validation", "test"]: return nn.Sequential( K.Normalize(mean=torch.tensor(dataset_mean, dtype=torch.float32), std=torch.tensor(dataset_std, dtype=torch.float32)), ) elif augmenter_type == "randaugment": return nn.Sequential( K.RandomCrop(size=image_size, padding=padding, pad_if_needed=pad_if_needed, padding_mode='reflect'), K.RandomHorizontalFlip(p=0.5), RandAugmentNS(n=subset_size, m=10), K.Normalize(mean=torch.tensor(dataset_mean, dtype=torch.float32), std=torch.tensor(dataset_std, dtype=torch.float32)), ) else: raise NotImplementedError(f"\"{augmenter_type}\" is not a supported augmenter type")
5,354,842
def query(limit=None, username=None, ids=None, user=None): """# Retrieve Workspaces Receive a generator of Workspace objects previously created in the Stark Bank API. If no filters are passed and the user is an Organization, all of the Organization Workspaces will be retrieved. ## Parameters (optional): - limit [integer, default None]: maximum number of objects to be retrieved. Unlimited if None. ex: 35 - username [string, default None]: query by the simplified name that defines the workspace URL. This name is always unique across all Stark Bank Workspaces. Ex: "starkbankworkspace" - ids [list of strings, default None]: list of ids to filter retrieved objects. ex: ["5656565656565656", "4545454545454545"] - user [Organization/Project object, default None]: Organization or Project object. Not necessary if starkbank.user was set before function call ## Return: - generator of Workspace objects with updated attributes """ return rest.get_stream(resource=_resource, limit=limit, username=username, ids=ids, user=user)
5,354,843
def _add_output_tensor_nodes(net, preprocess_tensors, output_collection_name='inferece_op'): """ Adds output nodes for all preprocess_tensors. :param preprocess_tensors: a dictionary containing the all predictions; :param output_collection_name: Name of collection to add output tensors to. :return: A tensor dict containing the added output tensor nodes. """ outputs = {} outputs['roi_scores'] = tf.identity(net.all_rois_scores, name='rois_scores') outputs['rois'] = tf.identity(net.all_rois, name='rois') for output_key in outputs.keys(): tf.add_to_collection(output_collection_name, outputs[output_key]) return outputs
5,354,844
def bf_print_answer(answer_json): # type: (Dict) -> None """Print the given answer JSON to console.""" print(bf_str_answer(answer_json))
5,354,845
def _configure_logger( fmt, quiet, level, fpath, processors, metric_grouping_interval, minimal ): """ configures a logger when required write to stderr or a file """ # NOTE not thread safe. Multiple BaseScripts cannot be instantiated concurrently. global _GLOBAL_LOG_CONFIGURED if _GLOBAL_LOG_CONFIGURED: return assert fmt in ["json", "pretty"] _processors = define_log_processors() _processors += processors or [] if metric_grouping_interval: _processors.append(metrics_grouping_processor) if minimal: _processors.append(_structlog_minimal_processor) streams = [] if fpath: f = FileWrapper(fpath) streams.append(f) if fmt == "json" and not quiet: streams.append(sys.stderr) if fmt == "pretty" and not quiet: _processors.append(StderrConsoleRenderer()) _processors.append(structlog.processors.JSONRenderer()) # a global level struct log config unless otherwise specified. level = getattr(logging, level.upper()) stream = streams[0] if len(streams) == 1 else Stream(*streams) atexit.register(stream.close) structlog.configure( processors=_processors, context_class=dict, logger_factory=LevelLoggerFactory(stream, level=level), wrapper_class=BoundLevelLogger, cache_logger_on_first_use=True, ) # TODO take care of removing other handlers stdlib_root_log = logging.getLogger() stdlib_root_log.addHandler(StdlibStructlogHandler()) stdlib_root_log.setLevel(level) _GLOBAL_LOG_CONFIGURED = True
5,354,846
def start_pane(pane, callback, program_info=''): """Open the user interface with the given initial pane.""" frame = Window(footer=program_info + ' | q: quit, ?: help') frame.open(pane, callback) palette = _add_calendar_colors(getattr(colors, pane.conf['view']['theme']), pane.collection) loop = urwid.MainLoop(frame, palette, unhandled_input=frame.on_key_press, pop_ups=True) # Make urwid use 256 color mode. loop.screen.set_terminal_properties( colors=256, bright_is_bold=pane.conf['view']['bold_for_light_color']) def ctrl_c(signum, f): raise urwid.ExitMainLoop() signal.signal(signal.SIGINT, ctrl_c) try: loop.run() except Exception: import traceback tb = traceback.format_exc() try: # Try to leave terminal in usable state loop.stop() except Exception: pass print(tb) sys.exit(1)
5,354,847
def gen_add_requests(trace_folder, number_names=10000, first_name=0, append_to_file=False, lns_ids=None, name_prefix=None): """Generates 'add' requests for 'number_names' from a set of local name servers # Workload generation parameters number_names = 10000 # number of names in workload. first_name = 0 # workload will have names in range (first_name, first_name + number_names) num_lns = -1 # set this to -1 if you want to generate trace for all LNS in lns geo file. # otherwise trace will be generated for first 'num_lns' in lns geo file append = True # append new requests to end of trace files lns_ids = None # list of IDs of local name servers in the order of their names in LNS geo file. # if lns_ids is not None, trace file for that LNS is name the same as is ID. name_prefix = None # if name-prefix is not None, append given prefix to all names """ names = [] for i in range(number_names): if name_prefix is None: name = str(i + first_name) else: name = name_prefix + str(i) names.append(name) gen_add_requests_names(trace_folder, names, lns_ids, append_to_file)
5,354,848
def list_file_details(dxm_state, rulesetname, envname, metaname): """ Display file details. Output list will be limited by value of --rulesetname, --envname or --metaname options if set and return non-zero return code if metaname is not found. """ exit(tab_listfile_details( dxm_state.engine, dxm_state.engineuser, dxm_state.format, rulesetname, envname, metaname))
5,354,849
def _set_constance_value(key, value): """ Parses and sets a Constance value from a string :param key: :param value: :return: """ form = ConstanceForm(initial=get_values()) field = form.fields[key] clean_value = field.clean(field.to_python(value)) setattr(config, key, clean_value)
5,354,850
def f_prob(times, lats, lons, members): """Probabilistic forecast containing also a member dimension.""" data = np.random.rand(len(members), len(times), len(lats), len(lons)) return xr.DataArray( data, coords=[members, times, lats, lons], dims=["member", "time", "lat", "lon"], attrs={"source": "test"}, )
5,354,851
def dunning_total_by_corpus(m_corpus, f_corpus): """ Goes through two corpora, e.g. corpus of male authors and corpus of female authors runs dunning_individual on all words that are in BOTH corpora returns sorted dictionary of words and their dunning scores shows top 10 and lowest 10 words :param m_corpus: Corpus object :param f_corpus: Corpus object :return: list of tuples (common word, (dunning value, m_corpus_count, f_corpus_count)) >>> from gender_analysis.analysis.dunning import dunning_total_by_corpus >>> from gender_analysis.corpus import Corpus >>> from gender_analysis.common import TEST_DATA_PATH >>> path = TEST_DATA_PATH / 'sample_novels' / 'texts' >>> csv_path = TEST_DATA_PATH / 'sample_novels' / 'sample_novels.csv' >>> c = Corpus(path, csv_path=csv_path) >>> m_corpus = c.filter_by_gender('male') >>> f_corpus = c.filter_by_gender('female') >>> result = dunning_total_by_corpus(m_corpus, f_corpus) >>> print(result[0]) ('she', (-12374.391057010947, 29382, 45907)) """ wordcounter_male = m_corpus.get_wordcount_counter() wordcounter_female = f_corpus.get_wordcount_counter() totalmale_words = 0 totalfemale_words = 0 for male_word in wordcounter_male: totalmale_words += wordcounter_male[male_word] for female_word in wordcounter_female: totalfemale_words += wordcounter_female[female_word] dunning_result = {} for word in wordcounter_male: wordcount_male = wordcounter_male[word] if word in wordcounter_female: wordcount_female = wordcounter_female[word] dunning_word = dunn_individual_word(totalmale_words, totalfemale_words, wordcount_male, wordcount_female) dunning_result[word] = (dunning_word, wordcount_male, wordcount_female) dunning_result = sorted(dunning_result.items(), key=itemgetter(1)) return dunning_result
5,354,852
def generate_normals_dataset(in_zarr, out_directory, variables=None, overwrite=False): """ Compute the normal (day-of-year (DOY) mean) for given variables in the provided Zarr dataset. Creates one xarray Dataset for each DOY, with dimensions "time", "latitude", and "longitude" and coordinates "time", "latitude", "longitude", "doy" with "doy" being a secondary coordinate for the "time" dimension. The "time" dimension is populated with an arbitrary datetime datetime from the year 2000 associated with the DOY. This makes the dataset easier to work with in systems that expect datetimes for a time-related dimension (e.g. THREDDS). Args: in_zarr (str): Path or address to a Zarr dataset containing time-series gridded data with dimensions "time", "latitude", and "longitude". out_directory (str): Path to directory where output will be written. variables (iterable): A list/tuple of variable names on which to compute day-of-year means. If not provided, all variables that are not dimension or coordinate variables will be processed. overwrite (bool): Overwrite existing output files if True. Defaults to False, skipping files/DOYs that already exist. """ out_directory = validate_directory(out_directory) log.info(f'Results will be written to {out_directory}') with xr.open_zarr(in_zarr) as ds: log.debug(f'Given Dataset:\n{ds}') # Use all variable if not provided if not variables: variables = [v for v in ds.variables if v not in ds.dims and v not in ds.coords] log.info(f'Computing day-of-year mean on the following variables: {" & ".join(variables)}') # Use first variable as template DataArray template_da = ds[variables[0]] lats = template_da.latitude.data.copy() lons = template_da.longitude.data.copy() # Create lookup array of dates to assign to each doy # THREDDS needs dates, so year 2000 chosen as an arbitrary leap year # Prepend extra day to beginning so lookup can be 1-indexed instead of zero-indexed datetime_for_ = pd.date_range( start=dt.datetime(year=1999, month=12, day=31), end=dt.datetime(year=2000, month=12, day=31), freq='D' ).to_list() # Track failed doy mean computations failed = {v: [] for v in variables} ref_period_start = ds["time"][0].dt.strftime('%Y-%m-%d').item() ref_period_end = ds["time"][-1].dt.strftime('%Y-%m-%d').item() # Group data by day-of-year doy_groups = template_da.groupby("time.dayofyear").groups doy_group_indices = [(d, i) for d, i in doy_groups.items()] for doy, doy_indices in tqdm(doy_group_indices): # Get arbitrary date for given day-of-year doy_date = datetime_for_[doy] # Build up data_vars arg for dataset data_vars = dict() # Determine output file format out_file = out_directory / \ f'reanalysis-era5-normal-pnt-{doy_date:%Y-%m-%d}.nc' if out_file.is_file(): if not overwrite: log.info(f'\nOutput for doy {doy} found at: {out_file}. Skipping...') continue else: out_file.unlink(missing_ok=True) for variable in variables: # Start computation for current DOY log.info(f'\nComputing mean for DOY {doy} for variable {variable}...') comp_start_time = dt.datetime.utcnow() # Compute mean for the current doy for all variables in parallel result = _compute_doy_mean(variable, ds[variable], doy, doy_indices, doy_date, ref_period_start, ref_period_end) if result['success'] is None: log.info(f'An unexpected error occurred while processing {variable} for DOY {doy}') failed[variable].append(str(doy)) continue if result['success'] is False: log.error(f'An error occurred while processing mean for {variable} for DOY {doy}:\n' f'{result["result"]["exception"]}\n' f'{result["result"]["traceback"]}') failed[variable].append(str(doy)) continue result_da = result['result'] log.info(f'Mean computation for DOY {doy} for {variable} took ' f'{humanize.naturaldelta(dt.datetime.utcnow() - comp_start_time)}') data_vars.update({result_da.attrs['long_name']: result_da}) # Create dataset for writing - write one file for each DOY out_ds = xr.Dataset( data_vars=data_vars, attrs={ 'reference_period_start': ref_period_start, 'reference_period_end': ref_period_end, } ) out_ds = out_ds.chunk(chunks={'time': 1, 'latitude': len(lats), 'longitude': len(lons)}) log.info(f'Out DataSet:\n' f'{out_ds}') log.info(f'Writing output: {out_file}') out_ds.to_netcdf(out_file) log.info(f'Processing complete for {variable} for DOY {doy}.') # Log summary of failed processing has_failures = False for variable, failed_doys in failed.items(): if not failed_doys: continue has_failures = True log.warning(f'Processing failed for the following DOYs for {variable}: ' f'{" ".join(failed_doys)}') if has_failures: log.warning(f'Process completed with failures. Please re-run to correct failures and continue.') exit(1)
5,354,853
def send_mail(content,uid,image,subject=None): #主题 """**主题如果是纯中文或纯英文则字符数必须大于等于5个, 不然会报错554 SPM被认为是垃圾邮件或者病毒** """ if not subject: subject = f"fc2热度更新{uid}{content.split(',')[0]}" #内容 #contents=f'{content}\n{image}' contents = f"<html><body><h1>{content}</h1><p><img src='cid:0'></p></body></html>" #服务器地址 smtpserver = 'smtp.office365.com' #smtpserver = 'smtp.qq.com' #用户名(不是邮箱) username = '提醒' #163授权码 password='zongqian12345' #password='ihpdbjbsiszgdach' msg = MIMEMultipart() msg.attach(MIMEText(contents, 'html', 'utf-8')) # 中文需参数‘utf-8',单字节字符不需要 msg['Subject'] = Header(subject, 'utf-8') msg['From'] = sender#username msg['To'] = receiver while 1: try: r = requests.get(image,timeout=8) break except Exception as e: if not 'time' in str(e): print("下载img失败",e) r='' break # 设置附件的MIME和文件名,这里是png类型: mime = MIMEBase('image', 'jpg', filename=f'{uid}.jpg') # 加上必要的头信息: mime.add_header('Content-Disposition', 'attachment', filename=f'{uid}.jpg') mime.add_header('Content-ID', '<0>') mime.add_header('X-Attachment-Id', '0') # 把附件的内容读进来: if r: mime.set_payload(r.content) else: mime.set_payload(b'1') # 用Base64编码: encoders.encode_base64(mime) # 添加到MIMEMultipart: msg.attach(mime) #服务器地址和端口25 smtp = smtplib.SMTP(smtpserver,587) #smtp = smtplib.SMTP_SSL(smtpserver, 465) smtp.starttls() try: smtp.login(sender, password) smtp.sendmail(sender, receiver, msg.as_string()) smtp.quit() print('发送邮件成功') except Exception as e: print("发送邮件失败",e)
5,354,854
def get_apikey() -> str: """ Read and return the value of the environment variable ``LS_API_KEY``. :return: The string value of the environment variable or an empty string if no such variable could be found. """ api_key = os.environ.get("LS_API_KEY") if api_key is None: warnings.warn("No token found in environment variable LS_API_KEY.") return api_key or ""
5,354,855
def test_pdf(): """ 测试pdf报表输出 :return: """ res = ResMsg() report_path = current_app.config.get("REPORT_PATH", "./report") file_name = "{}.pdf".format(uuid.uuid4().hex) path = os.path.join(report_path, file_name) path = pdf_write(path) path = path.lstrip(".") res.update(data=path) return res.data
5,354,856
def vprint(*args, apply=print, **kwargs): """ Prints the variable name, its type and value. :: vprint(5 + 5, sum([1,2])) > 5 + 5 (<class 'int'>): 10 sum([1,2]) (<class 'int'>): 3 """ def printarg(_name, _val) -> str: _string = f'{_name}: {igit_debug.formatting.pformat(_val, types=True)}' apply(_string) return _string strings = [] if args: currframe = inspect.currentframe() outer = inspect.getouterframes(currframe) frameinfo = outer[1] ctx = frameinfo.code_context[0].strip() argnames = ctx[ctx.find('(') + 1:-1].split(', ') if len(argnames) != len(args) + len(kwargs): print(f"Too complex statement, try breaking it down to variables or eliminating whitespace", # f'len(argnames): {len(argnames)}', f'len(args): {len(args)}', f'len(kwargs): {len(kwargs)}', # vprint(ctx, argnames, args, kwargs) ) # return for i, val in enumerate(args): try: name = argnames[i].strip() except IndexError: continue # TODO: break? strings.append(printarg(name, val)) for name, val in kwargs.items(): strings.append(printarg(name, val)) # return strings
5,354,857
def get_int(prompt: Optional[str] = None, min_value: Optional[int] = None, max_value: Optional[int] = None, condition: Optional[Callable[[int], bool]] = None, default: Optional[int] = None) -> int: """Gets an int from the command line. :param prompt: Input prompt. :param min_value: Minimum value of the parsed int. :param max_value: Maximum value of the parsed int. :param condition: Condition the int must match. :param default: Default value used if no characters are typed. :return: Input int. """ input_int = None input_str = None while input_int is None: try: input_str = input(_prompt_from_message(prompt, default=default)).strip() if default is not None and len(input_str) == 0: input_str = default input_int = int(input_str) if (min_value is not None and input_int < min_value) or \ (max_value is not None and input_int > max_value) or \ (condition is not None and not condition(input_int)): input_int = None raise ValueError() except ValueError: _print_invalid_value(input_str) return input_int
5,354,858
def make_stream_callback(observer, raw, frame_size, start, stop): """ Builds a callback function for stream plying. The observer is an object which implements methods 'observer.set_playing_region(b,e)' and 'observer.set_playing_end(e)'. raw is the wave data in a str object. frame_size is the number of bytes times number of channels per frame. start and stop indicate which slice of raw would be played. """ start_ref = [ start ] def callback(in_data, frame_count, time_info, status): start = start_ref[0] last = min(stop, start + frame_count*frame_size) data = raw[start:last] start_ref[0] = last if last == stop: observer.set_playing_end(last) else: observer.set_playing_region(start, last) return (data, pyaudio.paContinue) return callback
5,354,859
def predictCNN(segments, artifacts, device:torch.device = torch.device("cpu")): """ Perform model predictions on unseen data :param segments: list of segments (paragraphs) :param artifacts: run artifacts to evaluate :param device: torch device :return category predictions """ # Retrieve artifacts params = artifacts["params"] label_encoder = artifacts["label_encoder"] tokenizer = artifacts["tokenizer"] model = artifacts["model"] # Prepare dataset into model readable format preprocessed_segments = [preprocess.cleanText(segment, lower=params.lower, stem=params.stem) for segment in segments] X = np.array(tokenizer.texts_to_sequences(preprocessed_segments), dtype="object") y_blank = np.zeros((len(X), len(label_encoder))) dataset = CNNDataset(X=X, y=y_blank, max_filter_size=int(params.max_filter_size)) dataloader = dataset.create_dataloader(batch_size=int(params.batch_size)) # Get model predictions trainer = Trainer(model=model, device=device) _, y_prob = trainer.predict_step(dataloader) y_pred = [np.where(prob >= float(params.threshold), 1, 0) for prob in y_prob] categories = label_encoder.decode(y_pred) predictions = [{"input_text": segments[i], "preprocessed_text": preprocessed_segments[i], "predicted_tags": categories[i]} for i in range(len(categories))] return predictions
5,354,860
def load(dataset_directory: List, corpus_path: str, validate: bool = False): """ Given the path to a directory containing one or more h5ad files and a group name, call the h5ad loading function on all files, loading/concatenating the datasets together under the group name """ with tiledb.scope_ctx(create_ctx()): dataset_count = len(os.listdir(dataset_directory)) i = 0 for dataset in os.listdir(dataset_directory): i += 1 logger.info(f"Processing dataset {i} of {dataset_count}") h5ad_file_path = f"{dataset_directory}/{dataset}/local.h5ad" load_h5ad( h5ad_file_path, corpus_path, validate ) # TODO Can this be parallelized? need to be careful handling global indexes but tiledb has a lock I think gc.collect() logger.info("all loaded, now consolidating.") for arr_name in [f"{corpus_path}/{name}" for name in ["obs", "var", INTEGRATED_ARRAY_NAME]]: tiledb.consolidate(arr_name) tiledb.vacuum(arr_name)
5,354,861
def transform_url(url): """Normalizes url to '[email protected]:{username}/{repo}' and also returns username and repository's name.""" username, repo = re.search(r'[/:](?P<username>[A-Za-z0-9-]+)/(?P<repo>[^/]*)', url).groups() if url.startswith('git@'): return url, username, repo return '[email protected]:{username}/{repo}'.format(**locals()), username, repo
5,354,862
def prettyprint(data: dict, command: str, modifier: Optional[str] = '') -> str: """ Prettyprint the JSON data we get back from the API """ output = '' # A few commands need a little special treatment if command == 'job': command = 'jobs' if 'data' in data and 'jobs' in data['data']: output = prettyprint_jobs(data, command) elif 'data' in data and 'files' in data['data']: output = prettyprint_firmware(data, command) elif 'job_id' in data: output = prettyprint_job(data, command) elif 'data' in data and 'groups' in data['data']: output = prettyprint_groups(data, 'groups') elif 'data' in data and 'version' in data['data']: output = prettyprint_version(data, 'version') elif 'data' in data and command == 'device': output = prettyprint_device(data) elif 'data' in data and command in data['data']: output = prettyprint_command(data, command) elif 'status' in data and data['status'] == 'error': output = prettyprint_error(data) else: output = prettyprint_other(data) if modifier != '': output = prettyprint_modifier(output, modifier) return output
5,354,863
def decompress(src, dest, verbose, verify=False): """ Deompresses a file from src to dest @param src Path to the compressed file @param dest Path to the decompressed file @param verbose Path to the compressed file @param verify Bool value indicating if the decompressed file should be checked if it exists @return None """ cmd = get_decompress_cmd(src, dest, verbose) if verbose: printv('Decompressing ' + src + ' -> ' + dest) printv('Cmd: ' + ' '.join(cmd)) subprocess.run( cmd, check = True, stdout = subprocess.PIPE, stderr = subprocess.PIPE, env = { 'GZIP':'-f' }, ) if verify: abort_if(not os.path.isfile(dest), 'Dest %s was not created' % dest)
5,354,864
def get_address(get_address: GetAddressData, include_elster_responses: bool = False): """ The address data of the given idnr is requested at Elster and returned. Be aware, that you need a permission (aka an activated unlock_code) to query a person's data. :param get_address: the JSON input data for the request :param include_elster_responses: query parameter which indicates whether the ERiC/Server response are returned """ # For now, we do not allow data requests as we cannot guarantee that Elster already has the relevant data gathered raise NotImplementedError()
5,354,865
def kmor(X: np.array, k: int, y: float = 3, nc0: float = 0.1, max_iteration: int = 100, gamma: float = 10 ** -6): """K-means clustering with outlier removal Parameters ---------- X Your data. k Number of clusters. y Parameter for outlier detection. Increase this to make outlier removal subtle. nc0 Maximum percentage of your data that can be assigned to outlier cluster. max_iteration Maximum number of iterations. gamma Used to check the convergence. Returns ------- numpy.array Numpy array that contains the assigned cluster of each data point (0 to k, the cluster k is the outlier cluster) """ n = X.shape[0] n0 = int(nc0 * X.shape[0]) Z = X[np.random.choice(n, k)] def calculate_dd(U, Z): return np.linalg.norm(X - Z[U], axis=1) ** 2 def calculate_D(outliers, dd): factor = y / (n - outliers.size) return factor * np.sum(np.delete(dd, outliers)) def calculate_U(X): def closest(p): return np.argmin(np.linalg.norm(Z - p, axis=1)) return np.apply_along_axis(closest, 1, X) outliers = np.array([]) U = calculate_U(X) s = 0 p = 0 while True: # Update U (Theorem 1) dd = calculate_dd(U, Z) D = calculate_D(outliers, dd) dd2 = dd[dd > D] outliers = np.arange(n)[dd > D][dd2.argsort()[::-1]] outliers = outliers[:n0] U = calculate_U(X) # Update Z (Theorem 3) is_outlier = np.isin(U, outliers) def mean_group(i): x = X[np.logical_and(U == i, ~is_outlier)] # Empty group if x.size == 0: x = X[np.random.choice(n, 1)] return x.mean(axis=0) Z = np.array([mean_group(i) for i in range(k)]) # Update P dd = calculate_dd(U, Z) D = calculate_D(outliers, dd) if outliers.size == 0: p1 = np.sum(dd) else: p1 = np.sum(dd[~outliers]) + D * outliers.size # Exit condition s += 1 if abs(p1 - p) < gamma or s > max_iteration: break p = p1 print("s:", s, "p:", p) U[outliers] = k return U
5,354,866
def compile_pipeline(pipeline_source: str, pipeline_name: str) -> str: """Read in the generated python script and compile it to a KFP package.""" # create a tmp folder tmp_dir = tempfile.mkdtemp() # copy generated script to temp dir copyfile(pipeline_source, tmp_dir + '/' + "pipeline_code.py") path = tmp_dir + '/' + 'pipeline_code.py' spec = importlib.util.spec_from_file_location(tmp_dir.split('/')[-1], path) foo = importlib.util.module_from_spec(spec) spec.loader.exec_module(foo) # path to generated pipeline package pipeline_package = os.path.join(os.path.dirname(pipeline_source), pipeline_name + '.pipeline.yaml') Compiler().compile(foo.auto_generated_pipeline, pipeline_package) return pipeline_package
5,354,867
def turnout_div(turnout_main, servo, gpo_provider): """Create a turnout set to the diverging route""" turnout_main.set_route(True) # Check that the route was set to the diverging route assert(servo.get_angle() == ANGLE_DIV) assert(gpo_provider.is_enabled()) return turnout_main
5,354,868
def num_jewels(J: str, S: str) -> int: """ Time complexity: O(n + m) Space complexity: O(n) """ jewels = set(J) return sum(stone in jewels for stone in S)
5,354,869
def internet(host="8.8.8.8", port=53, timeout=3): """ Host: 8.8.8.8 (google-public-dns-a.google.com) OpenPort: 53/tcp Service: domain (DNS/TCP) """ try: socket.setdefaulttimeout(timeout) socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port)) logger.info('Internet is there!!') return True except Exception as ex: logger.warning('Internet is gone!!') return False
5,354,870
def run(): """ convert infer json to bin, each sentence is one file bin """ args = parse_args() _params = sstcfg import_modules() dataset_reader_params_dict = _params.get("dataset_reader") dataset_reader = dataset_reader_from_params(dataset_reader_params_dict) train_wrapper = dataset_reader.dev_reader.data_generator() ids_path = os.path.join(args.result_path, "00_data") mask_path = os.path.join(args.result_path, "02_data") token_path = os.path.join(args.result_path, "01_data") label_path = os.path.join(args.result_path, "03_data") os.makedirs(ids_path) os.makedirs(mask_path) os.makedirs(token_path) os.makedirs(label_path) idx = 0 for i in train_wrapper(): input_ids = np.array(i[2], dtype=np.int32) input_mask = np.array(i[5], dtype=np.int32) token_type_id = np.array(i[3], dtype=np.int32) label_ids = np.array(i[1], dtype=np.int32) file_name = "senta_batch_1_" + str(idx) + ".bin" ids_file_path = os.path.join(ids_path, file_name) input_ids.tofile(ids_file_path) mask_file_path = os.path.join(mask_path, file_name) input_mask.tofile(mask_file_path) token_file_path = os.path.join(token_path, file_name) token_type_id.tofile(token_file_path) label_file_path = os.path.join(label_path, file_name) label_ids.tofile(label_file_path) idx += 1 print("=" * 20, "export bin files finished", "=" * 20)
5,354,871
def map_family_situation(code): """Maps French family situation""" status = FamilySituation mapping = { "M": status.MARRIED.value, "C": status.SINGLE.value, "V": status.WIDOWED.value, "D": status.DIVORCED.value, "O": status.PACSED.value, } if code in mapping.keys(): return mapping[code] else: logging.warning("In {}, args {} not recognised".format("family_situation", code)) return code
5,354,872
def transform(x, channels, img_shape, kernel_size=7, threshold=1e-4): """ ---------- X : WRITEME data with axis [b, 0, 1, c] """ for i in channels: assert isinstance(i, int) assert i >= 0 and i <= x.shape[3] x[:, :, :, i] = lecun_lcn(x[:, :, :, i], img_shape, kernel_size, threshold) return x
5,354,873
def test_error(): """Expect raising SystemExit""" with pytest.raises(SystemExit): error('Error')
5,354,874
def delete_files(files=[]): """This decorator deletes files before and after a function. This is very useful for installation procedures. """ def my_decorator(func): @functools.wraps(func) def function_that_runs_func(self, *args, **kwargs): # Inside the decorator # Delete the files - prob don't exist yet delete_paths(files) # Run the function stuff = func(self, *args, **kwargs) # Delete the files if they do exist delete_paths(files) return stuff return function_that_runs_func return my_decorator
5,354,875
async def update_role(guild_id: int, role: RoleModel, db: AsyncSession = Depends(get_db_session)): """Update a role by id""" await crud.roles.update_role(db=db, role=role)
5,354,876
def get_town_table(screenshot_dir): """Generate python code for town table Its format is table[town_name] = (nearby town1, nearby town2...nearby town5) The length of tuple may be different depends on town. Arguments: screenshot_dir (str): Directory which have town_name directory and label. Return: python code style string (str) """ result = "TOWNS_TABLE = {}\n" for di in sorted(os.listdir(screenshot_dir)): dir_path = screenshot_dir + "/" + di if not os.path.isdir(dir_path): continue for f in os.listdir(dir_path): if f.lower().endswith(".txt"): result += "TOWNS_TABLE[(" lines = open(dir_path + "/" + f).read().splitlines() for i in range(3, len(lines), 3): result += "'%s', " % lines[i] result = result[:-2] + ")]\\" result += "\n= '%s'\n" % di break return result
5,354,877
def pip_install(pkg, upgrade=True): """ Call ``pip install`` for a given package. If ``upgrade==True``, call with ``--upgrade`` key (upgrade current version if it is already installed). """ if upgrade: subprocess.call([sys.executable, "-m", "pip", "install", "--upgrade", pkg]) else: subprocess.call([sys.executable, "-m", "pip", "install", pkg])
5,354,878
def add_note(front, back, tag, model, deck, note_id=None): """ Add note with `front` and `back` to `deck` using `model`. If `deck` doesn't exist, it is created. If `model` doesn't exist, nothing is done. If `note_id` is passed, it is used as the note_id """ model = mw.col.models.byName(model) if model: mw.col.decks.current()['mid'] = model['id'] else: return None # Creates or reuses deck with name passed using `deck` did = mw.col.decks.id(deck) deck = mw.col.decks.get(did) note = mw.col.newNote() note.model()['did'] = did note.fields[0] = front note.fields[1] = back if note_id: note.id = note_id note.addTag(tag) mw.col.addNote(note) mw.col.save() return note.id
5,354,879
def main(): """ Main method for the test run simulation app :return: None """ # init steps timing event_period_s = 3 total = [20, 15, 30, 10] # create status object table status = [] # fill up test run start info for i in range(4): status.append(RunStatus(BLYNK_AUTH, i)) status[i].start(total[i], "Run {}".format(i)) # TODO random progress # Generate test steps for actual in range(max(total)): print("Loop {}".format(actual)) time.sleep(event_period_s) for i in range(4): if actual < status[i].test_run.total: if random.choice(CHANCE_SUCCESS): status[i].add_succeed() elif random.choice(CHANCE_FAILED): status[i].add_failed() else: status[i].add_blocked() time.sleep(event_period_s) for i in range(4): status[i].stop()
5,354,880
def indices_to_one_hot(data, nb_classes): #separate: embedding """Convert an iterable of indices to one-hot encoded labels.""" targets = np.array(data).reshape(-1) return np.eye(nb_classes)[targets]
5,354,881
def translateToceroZcoord(moleculeRDkit): """ Translate the molecule to put the first atom in the origin of the coordinates Parameters ---------- moleculeRDkit : RDkit molecule An RDkit molecule Returns ------- List List with the shift value applied to X, Y, Z """ from rdkit.Chem import rdMolTransforms conf = moleculeRDkit.GetConformer() # avoid first atom overlap with dummy 3 if abs(conf.GetAtomPosition(0).x-1.0)<1e-3 and abs(conf.GetAtomPosition(0).y-1.0)<1e-3 and abs(conf.GetAtomPosition(0).z-0.0)<1e-3: shiftX = conf.GetAtomPosition(0).x - 1.0 shiftY = conf.GetAtomPosition(0).y - 1.0 shiftZ = conf.GetAtomPosition(0).z translationMatrix = np.array( [[1, 0, 0, -shiftX], [0, 1, 0, -shiftY], [0, 0, 1, -shiftZ], [0, 0, 0, 1]], dtype=np.double) rdMolTransforms.TransformConformer(conf, translationMatrix) else: shiftX = 0.0 shiftY = 0.0 shiftZ = 0.0 return [shiftX, shiftY, shiftZ]
5,354,882
def disable_directory(DirectoryArn=None): """ Disables the specified directory. Disabled directories cannot be read or written to. Only enabled directories can be disabled. Disabled directories may be reenabled. See also: AWS API Documentation Exceptions :example: response = client.disable_directory( DirectoryArn='string' ) :type DirectoryArn: string :param DirectoryArn: [REQUIRED]\nThe ARN of the directory to disable.\n :rtype: dict ReturnsResponse Syntax{ 'DirectoryArn': 'string' } Response Structure (dict) -- DirectoryArn (string) --The ARN of the directory that has been disabled. Exceptions CloudDirectory.Client.exceptions.ResourceNotFoundException CloudDirectory.Client.exceptions.DirectoryDeletedException CloudDirectory.Client.exceptions.InternalServiceException CloudDirectory.Client.exceptions.ValidationException CloudDirectory.Client.exceptions.LimitExceededException CloudDirectory.Client.exceptions.AccessDeniedException CloudDirectory.Client.exceptions.RetryableConflictException CloudDirectory.Client.exceptions.InvalidArnException :return: { 'DirectoryArn': 'string' } """ pass
5,354,883
def standardize(mri): """ Standardize mean and standard deviation of each channel and z_dimension slice to mean 0 and standard deviation 1. Note: setting the type of the input mri to np.float16 beforehand causes issues, set it afterwards. Args: mri (np.array): input mri, shape (dim_x, dim_y, dim_z, num_channels) Returns: standardized_mri (np.array): standardized version of input mri """ standardized_mri = np.zeros(mri.shape) # Iterate over channels for c in range(mri.shape[3]): # Iterate over the `z` depth dimension for z in range(mri.shape[2]): # Get a slice of the mri at channel c and z-th dimension mri_slice = mri[:, :, z, c] # Subtract the mean from mri_slice centered = mri_slice - np.mean(mri_slice) # Divide by the standard deviation (only if it is different from zero) if np.std(centered) != 0: centered_scaled = centered / np.std(centered) # Update the slice of standardized mri with the centered and scaled mri standardized_mri[:, :, z, c] = centered_scaled return standardized_mri
5,354,884
def current_floquet_kets(eigensystem, time): """ Get the Floquet basis kets at a given time. These are the |psi_j(t)> = exp(-i energy[j] t) |phi_j(t)>, using the notation in Marcel's thesis, equation (1.13). """ weights = np.exp(time * eigensystem.abstract_ket_coefficients) weights = weights.reshape((1, -1, 1)) return np.sum(weights * eigensystem.k_eigenvectors, axis=1)
5,354,885
def summary(): """ DB summary stats """ cur = get_cur() res = [] try: cur.execute('select count(study_id) as num_studies from study') res = cur.fetchone() except: dbh.rollback() finally: cur.close() if res: return Summary(num_studies=res['num_studies']) else: return []
5,354,886
def get_clockwork_conformations(molobj, torsions, resolution, atoms=None, debug=False, timings=False): """ Get all conformation for specific cost cost defined from torsions and resolution """ n_torsions = len(torsions) if atoms is None: atoms, xyz = cheminfo.molobj_to_xyz(molobj, atom_type="int") del xyz combinations = clockwork.generate_clockwork_combinations(resolution, n_torsions) # Collect energies and coordinates end_energies = [] end_coordinates = [] end_representations = [] first = True for resolutions in combinations: time_start = time.time() # Get all conformations c_energies, c_coordinates, c_states = get_conformations(molobj, torsions, resolutions) N = len(c_energies) # Filter unconverged success = np.argwhere(c_states == 0) success = success.flatten() c_energies = c_energies[success] c_coordinates = c_coordinates[success] N2 = len(c_energies) # Calculate representations c_representations = [sim.get_representation(atoms, coordinates) for coordinates in c_coordinates] c_representations = np.asarray(c_representations) # Clean all new conformers for energies and similarity idxs = clean_representations(atoms, c_energies, c_representations) c_energies = c_energies[idxs] c_coordinates = c_coordinates[idxs] c_representations = c_representations[idxs] if first: first = False end_energies += list(c_energies) end_coordinates += list(c_coordinates) end_representations += list(c_representations) continue # Asymmetrically add new conformers idxs = merge.merge_asymmetric(atoms, c_energies, end_energies, c_representations, end_representations) # Add new unique conformation to return collection for i, idx in enumerate(idxs): # if conformation already exists, continue if len(idx) > 0: continue # Add new unique conformation to collection end_energies.append(c_energies[i]) end_coordinates.append(c_coordinates[i]) end_representations.append(c_representations[i]) time_end = time.time() if timings: timing = time_end - time_start print("res time {:8.2f} cnf/sec - {:8.2f} tot sec".format(N/timing, timing)) continue return end_energies, end_coordinates
5,354,887
def rotate_affine(img, rot=None): """Rewrite the affine of a spatial image.""" if rot is None: return img img = nb.as_closest_canonical(img) affine = np.eye(4) affine[:3] = rot @ img.affine[:3] return img.__class__(img.dataobj, affine, img.header)
5,354,888
async def announce(ctx, destination, counter: Counter, /, **kwargs): """Announce the current count of a `Counter` somewhere.""" announcement = counter.get_announcement(**kwargs) await ctx.module_message(destination, announcement) if (msg := SPECIAL_NUMBERS.get(counter.count)) is not None: await asyncio.sleep(1) action = msg.startswith('*') and msg.endswith('*') await ctx.module_message(destination, msg, action)
5,354,889
def load_default(name): """Load the default session. Args: name: The name of the session to load, or None to read state file. """ if name is None and session_manager.exists('_autosave'): name = '_autosave' elif name is None: try: name = configfiles.state['general']['session'] except KeyError: # No session given as argument and none in the session file -> # start without loading a session return try: session_manager.load(name) except SessionNotFoundError: message.error("Session {} not found!".format(name)) except SessionError as e: message.error("Failed to load session {}: {}".format(name, e)) try: del configfiles.state['general']['session'] except KeyError: pass # If this was a _restart session, delete it. if name == '_restart': session_manager.delete('_restart')
5,354,890
def test_grad_hermite_multidimensional_numba_vs_finite_differences(tol): """Tests the gradients of hermite_numba. The gradients of parameters are tested by finite differences""" cutoff = 4 R = np.random.rand(cutoff, cutoff) + 1j * np.random.rand(cutoff, cutoff) R += R.T y = np.random.rand(cutoff) + 1j * np.random.rand(cutoff) C = 0.5 gate = hermite_multidimensional_numba(R, cutoff, y, C = C, dtype=np.complex128) grad_C, grad_R, grad_y = grad_hermite_multidimensional_numba( gate, R, cutoff, y, C = C, dtype=np.complex128 ) delta_plus = 0.00001 + 1j * 0.00001 expected_grad_C = ( hermite_multidimensional_numba(R, cutoff, y, C = C + delta_plus) - hermite_multidimensional_numba(R, cutoff, y, C = C - delta_plus) ) / (2 * delta_plus) assert np.allclose(grad_C, expected_grad_C, atol=tol, rtol=0) expected_grad_y = ( hermite_multidimensional_numba(R, cutoff, y + delta_plus, C = C) - hermite_multidimensional_numba(R, cutoff, y - delta_plus, C = C) ) / (2 * delta_plus) assert np.allclose(grad_y, expected_grad_y, atol=tol, rtol=0) expected_grad_R = ( hermite_multidimensional_numba(R + delta_plus, cutoff, y, C = C) - hermite_multidimensional_numba(R - delta_plus, cutoff, y, C = C) ) / (2 * delta_plus) assert np.allclose(grad_R, expected_grad_R, atol=tol, rtol=0)
5,354,891
def decode_file_example(): """ Example for decoding a file :return: """ with open("ais.exploratorium.edu", "r") as file: for aline in file: try: msg = aline.rstrip("\n") ais_data = pyAISm.decod_ais(msg) # Return a dictionnary ais_format = pyAISm.format_ais(ais_data) # A more human readable dictionnary print(ais_format) # Accessing the value of the key except pyAISm.UnrecognizedNMEAMessageError as e: print (e) except pyAISm.BadChecksumError as e: print (e) except Exception as e: print (e) print('End of file')
5,354,892
def validate_ttl(options): """ Check with Vault if the ttl is valid. :param options: Lemur option dictionary :return: 1. Boolean if the ttl is valid or not. 2. the ttl in hours. """ if 'validity_end' in options and 'validity_start' in options: ttl = math.floor(abs(options['validity_end'] - options['validity_start']).total_seconds() / 3600) elif 'validity_years' in options: ttl = options['validity_years'] * 365 * 24 else: ttl = 0 headers = {'X-Vault-Token': vault_auth.get_token()} url = '{}/roles/{}'.format(current_app.config.get('VAULT_PKI_URL'), options['authority'].name) res, resp = vault_read_request(url, headers) if res: max_ttl = resp.json()['data']['max_ttl'] text_file = open("max_ttl.txt", "wt") n = text_file.write(str(max_ttl)) text_file.close() if int(max_ttl) < ttl: current_app.logger.info('Certificate TTL is above max ttl - ' + max_ttl) return True, ttl else: return True, ttl else: current_app.logger.info('Vault: Failed to get Vault max TTL') raise Exception('Vault: ' + resp)
5,354,893
def vgg16(pretrained:bool=False,progress:bool=True,**kwargs:Any) ->VGG: """ Args: pretrained(bool):是否加载预训练参数 progress(bool):是否显示下载数据的进度条 Return: 返回VGG模型 """ return _vgg("vgg16","D",False,pretrained,progress,**kwargs)
5,354,894
def cy_gate(N=None, control=0, target=1): """Controlled Y gate. Returns ------- result : :class:`qutip.Qobj` Quantum object for operator describing the rotation. """ if (control == 1 and target == 0) and N is None: N = 2 if N is not None: return gate_expand_2toN(cy_gate(), N, control, target) return Qobj([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, -1j], [0, 0, 1j, 0]], dims=[[2, 2], [2, 2]])
5,354,895
def sqf(f, *gens, **args): """ Compute square-free factorization of ``f``. **Examples** >>> from sympy import sqf >>> from sympy.abc import x >>> sqf(2*x**5 + 16*x**4 + 50*x**3 + 76*x**2 + 56*x + 16) 2*(1 + x)**2*(2 + x)**3 """ return _generic_factor(f, gens, args, method='sqf')
5,354,896
def update_options_dpd2(dpd1_val): """ Updates the contents of the second dropdown menu based of the value of the first dropdown. :param dpd1_val: str, first dropdown value :return: list of dictionaries, labels and values """ all_options = [ strings.CITY_GDANSK, strings.CITY_GDYNIA, strings.CITY_KALINGRAD, strings.CITY_KLAIPEDA, strings.CITY_STPETERBURG, ] all_options.remove(dpd1_val) options = [{"label": opt, "value": opt} for opt in all_options] return options
5,354,897
def compute_min_paths_from_monitors(csv_file_path, delimiter='\t', origin_as=PEERING_ORIGIN): """ Inputs: csv_file_path, delimiter : csv file containing entries with the following format: |collector|monitor|as_path, and the delimiter used origin_as: the ASN you want to use as the terminal one for the as_path length computation Output: A dictionary that contains for each monitor found in the given csv file, the minimum length path and its length. """ monitor_routes = {} # contains the minimum length found for each route monitor # key:monitor(string), value: (minimum as_path length(integer), # the minimum length as_path(list of positive integers)) with open(csv_file_path) as csv_file: csv_reader = csv.reader(csv_file, delimiter=delimiter) row_count = 0 for row in csv_reader: row_count += 1 monitor = row[1] # AS-path prep removing prepending and bgp poisoning as_path_list = AS_path().make_list(row[2]) # as_path(string) -> as_path (list of positive integers) as_path_rem_prepend = AS_path().remove_prependings(as_path_list) as_path_cleared = AS_path().remove_loops(as_path_rem_prepend) as_path_length = AS_path().count_length(as_path_cleared, origin_as) if monitor in monitor_routes.keys(): if monitor_routes[monitor][0] > as_path_length: monitor_routes[monitor] = (as_path_length, as_path_cleared) else: monitor_routes[monitor] = (as_path_length, as_path_cleared) return monitor_routes
5,354,898
def _blanking_rule_ftld_or3a(rule): """ See _blanking_rule_ftld_or2a for rules """ if rule == 'Blank if Question 1 FTDIDIAG = 0 (No)': return lambda packet: packet['FTDIDIAG'] == 0 elif rule == 'Blank if Question 3 FTDFDGPE = 0 (No)': return lambda packet: packet['FTDFDGPE'] == 0 elif rule == 'Blank if Question 3a FTDFDGFh = 0 (No) or 9 (Unknown)': return lambda packet: packet['FTDFDGFh'] in (0, 9) elif rule == 'Blank if Question 3a11, FTDFDGOA, ne 1 (Yes)': return lambda packet: packet['FTDFDGOA'] != 1 else: return lambda packet: False
5,354,899