content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def is_checkpointing() -> bool: """Whether the current forward propagation is under checkpointing. Returns: bool: :data:`True` if it's under checkpointing. """ return thread_local.is_checkpointing
2779c059622bfe15586f69e4c0cfeb3bbf16a754
6,281
import json def create_tile_assets_profile(iam, profile_name, locations): """ Creates a profile (and corresponding role) with read and write access to the tile assets bucket. """ profile = iam.create_instance_profile( InstanceProfileName=profile_name, Path='/', ) iam.create_role( RoleName=profile_name, AssumeRolePolicyDocument=json.dumps( assume_role_policy_document('ec2.amazonaws.com')), ) iam.add_role_to_instance_profile( InstanceProfileName=profile_name, RoleName=profile_name, ) assets_path = locations.assets.name + '/' + locations.assets.prefix + '/*' policy = { "Version": "2012-10-17", "Statement": [ { "Sid": "VisualEditor0", "Effect": "Allow", "Action": [ "s3:PutObject", "s3:GetObject", "s3:DeleteObject" ], "Resource": 'arn:aws:s3:::' + assets_path, }, { "Sid": "VisualEditor1", "Effect": "Allow", "Action": "s3:ListBucket", "Resource": 'arn:aws:s3:::' + locations.assets.name, } ] } iam.put_role_policy( RoleName=profile_name, PolicyName='AllowReadWriteAccessToTilesAssetsBucket', PolicyDocument=json.dumps(policy), ) return profile['InstanceProfile']
e1e9bfb9405b4558fbf9972dfab67bd22a9f0189
6,282
import scipy def noise_filter(rgb_array, coef=8, read_noise=2, shot_noise=246): """ Apply bilateral noise filter to RGB image""" h, w, _ = rgb_array.shape luma_img = rgb_array[:, :, 0] + rgb_array[:, :, 1] + rgb_array[:, :, 2] average = scipy.ndimage.filters.uniform_filter(luma_img, 5, mode='mirror') sigma_map = average * shot_noise + read_noise del average sigma_map[sigma_map < 1] = 1 sy, sx = sigma_map.strides sigma_tile = as_strided(sigma_map, strides=(sy, sx, 0, 0), shape=(h, w, 5, 5)) sigma_tile = sigma_tile[2:h-2, 2:w-2, :, :] del sigma_map sy, sx = luma_img.strides luma_tile = as_strided(luma_img, strides=(sy, sx, 0, 0), shape=(h, w, 5, 5)) luma_tile = luma_tile[2:h-2, 2:w-2, :, :] luma_box = as_strided(luma_img, strides=(sy, sx, sy, sx), shape=(h-4, w-4, 5, 5)) del luma_img diff = luma_box - luma_tile del luma_tile, luma_box diff = diff * diff weight = np.exp(-coef * diff / sigma_tile) del diff, sigma_tile weight_sum = weight.sum(axis=(2, 3)) sy, sx, sz, sw = weight.strides weight_extend = as_strided(weight, strides=(sy, sx, 0, sz, sw), shape=(h-4, w-4, 3, 5, 5)) del weight sy, sx = weight_sum.strides weight_sum_extend = as_strided(weight_sum, strides=(sy, sx, 0), shape=(h-4, w-4, 3)) del weight_sum sy, sx, sz = rgb_array.strides img_boxes = as_strided(rgb_array, strides=(sy, sx, sz, sy, sx), shape=(h-4, w-4, 3, 5, 5)) img_flt = (weight_extend * img_boxes).sum(axis=(3, 4)) / weight_sum_extend return img_flt
6178429e237a56081696696c4d35f9fea5459065
6,283
import json def drop_entity(p_json: json): """ Удаляет сущность :param p_json: json с указанием id сущности, которую нужно удалить """ try: l_model=Model(p_json=p_json) l_model.drop_entity() return _JsonOutput(p_json_object=None, p_message="Entity has dropped successfully").body except Exception as e: return _JsonOutput(p_json_object=None, p_error=e.args[0]).body
1bec1f8f42d6aea39e25078383b018c2a651e5e5
6,284
from typing import Optional from typing import Sequence def get_ssl_vpn_client_certs(ids: Optional[Sequence[str]] = None, name_regex: Optional[str] = None, output_file: Optional[str] = None, ssl_vpn_server_id: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSslVpnClientCertsResult: """ The SSL-VPN client certificates data source lists lots of SSL-VPN client certificates resource information owned by an Alicloud account. ## Example Usage ```python import pulumi import pulumi_alicloud as alicloud foo = alicloud.vpc.get_ssl_vpn_client_certs(ids=["fake-cert-id"], name_regex="^foo", output_file="/tmp/clientcert", ssl_vpn_server_id="fake-server-id") ``` :param Sequence[str] ids: IDs of the SSL-VPN client certificates. :param str name_regex: A regex string of SSL-VPN client certificate name. :param str output_file: Save the result to the file. :param str ssl_vpn_server_id: Use the SSL-VPN server ID as the search key. """ __args__ = dict() __args__['ids'] = ids __args__['nameRegex'] = name_regex __args__['outputFile'] = output_file __args__['sslVpnServerId'] = ssl_vpn_server_id if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('alicloud:vpc/getSslVpnClientCerts:getSslVpnClientCerts', __args__, opts=opts, typ=GetSslVpnClientCertsResult).value return AwaitableGetSslVpnClientCertsResult( certs=__ret__.certs, id=__ret__.id, ids=__ret__.ids, name_regex=__ret__.name_regex, names=__ret__.names, output_file=__ret__.output_file, ssl_vpn_server_id=__ret__.ssl_vpn_server_id)
3d3bb6664aff7468c684a6e2e16a887fbbc3f425
6,285
from typing import Optional def quantile(h: Distogram, value: float) -> Optional[float]: """ Returns a quantile of the distribution Args: h: A Distogram object. value: The quantile to compute. Must be between 0 and 1 Returns: An estimation of the quantile. Returns None if the Distogram object contains no element or value is outside of (0:1). """ if len(h.bins) == 0: return None if not (0 <= value <= 1): return None total_count = count(h) q_count = int(total_count * value) v0, f0 = h.bins[0] vl, fl = h.bins[-1] if q_count <= (f0 / 2): # left values fraction = q_count / (f0 / 2) result = h.min + (fraction * (v0 - h.min)) elif q_count >= (total_count - (fl / 2)): # right values base = q_count - (total_count - (fl / 2)) fraction = base / (fl / 2) result = vl + (fraction * (h.max - vl)) else: mb = q_count - f0 / 2 mids = [(fi + fj) / 2 for (_, fi), (_, fj) in zip(h.bins[:-1], h.bins[1:])] i, _ = next(filter(lambda i_f: mb < i_f[1], enumerate(accumulate(mids)))) (vi, _), (vj, _) = h.bins[i], h.bins[i + 1] fraction = (mb - sum(mids[:i])) / mids[i] result = vi + (fraction * (vj - vi)) return result
76f2a9b33d2e3e6a4a419a9f32cff591e191a145
6,286
def arachni_del_vuln(request): """ The function Delete the Arachni Vulnerability. :param request: :return: """ if request.method == 'POST': vuln_id = request.POST.get("del_vuln", ) un_scanid = request.POST.get("scan_id", ) scan_item = str(vuln_id) value = scan_item.replace(" ", "") value_split = value.split(',') split_length = value_split.__len__() print "split_lenght", split_length for i in range(0, split_length): vuln_id = value_split.__getitem__(i) delete_vuln = arachni_scan_result_db.objects.filter(vuln_id=vuln_id) delete_vuln.delete() arachni_all_vul = arachni_scan_result_db.objects.filter(scan_id=un_scanid).values( 'name', 'severity', 'vuln_color' ).distinct() total_vul = len(arachni_all_vul) total_high = len(arachni_all_vul.filter(severity="high")) total_medium = len(arachni_all_vul.filter(severity="medium")) total_low = len(arachni_all_vul.filter(severity="low")) arachni_scan_db.objects.filter(scan_id=un_scanid).update( total_vul=total_vul, high_vul=total_high, medium_vul=total_medium, low_vul=total_low ) messages.success(request, "Deleted vulnerability") return HttpResponseRedirect("/webscanners/arachni_list_vuln?scan_id=%s" % un_scanid)
3be794525025fec019a5f76e0d885519077bc72a
6,287
def construct_imports(variables, imports): """Construct the list of imports by expanding all command line arguments.""" result = {} for i in imports: kv = i.split('=', 1) if len(kv) != 2: print 'Invalid value for --imports: %s. See --help.' % i sys.exit(1) result[kv[0]] = expand_template(kv[1], variables, result) return result
2e26b3496dff96fa713e2388af415cadf831d032
6,289
import re def is_regex(param): """ 判断参数是否是合法正则表达式字符串 :param param: {String} 参数 :return: {Boolean} 是否是合法正则表达式 """ try: re.compile(param) return True except re.error: return False
6a3ee33e68e33d3557db546beadc005235360080
6,290
def NetCDF_SHP_lat_lon(name_of_nc, box_values, name_of_lat_var, name_of_lon_var, correct_360): """ @ author: Shervan Gharari @ Github: https://github.com/ShervanGharari/candex @ author's email id: [email protected] @license: Apache2 This function gets a NetCDF file the assosiated shapefile given the cordination of a given box if correct_360 is True then the code convert the lon values more than 180 to negative lon Arguments --------- name_of_nc: string, the name of the nc file box_values: the box to limit to a specific domain name_of_lat_var: string, the name of the variable lat name_of_lon_var: string, the name of the variable lon correct_360: logical, True or Flase Returns ------- result: a shapefile for the NetCDF file """ # open the nc file to read dataset = xr.open_dataset(name_of_nc, decode_times=False) # reading the lat and lon and converting them to np.array lat = dataset[name_of_lat_var].data lon = dataset[name_of_lon_var].data lat = np.array(lat) lon = np.array(lon) # check if lat and lon are 1 D, if yes then they should be converted to 2D lat and lon WARNING only for case 1 and 2 if len(lat.shape) == 1 and len(lon.shape) == 1: lat, lon = lat_lon_2D(lat, lon) # creating the shapefile result = lat_lon_SHP(lat, lon, box_values, correct_360) return result
dc214f4449193f0daef0327df596c3109837a16e
6,291
from datetime import datetime import logging def format_issues( input_issues: list, developer_ids: list, start_date: datetime.datetime, end_date: datetime.datetime, end_date_buffer: int = 0, ) -> list: """extract and formats key fields into an output list Args: input_issues: issues (tuples) from GitHub developer_ids: GitHub id strings to filter start_date: start date of report end_date: similar, passed in for testing end_date_buffer: number of days to add to 'end time' Returns: list issues_summary: list of tuples with select, reformatted fields """ logging.info("beginning format issues") issues_summary = [] len(input_issues) for issue in input_issues: logging.info(f"formatting issue #: {issue.number}") # determine branch based on common PR naming pattern with [X.Y] branch prefix if "[main]" in issue.title or "[3." not in issue.title: branch_name = "[main]" else: branch_name = str(issue.title).split(" ", 2)[0] match issue.state: case "open": # issues we authored if ( issue.user.login in developer_ids and check_if_issue_date_interesting( issue.updated_at, start_date, end_date, end_date_buffer ) ): issues_summary.append( tuple( ( f"{issue.updated_at}", "Issue", "opened", f"{branch_name.rjust(6)}", f"{issue.url}", f"{issue.title}", ) ) ) # issues we closed case "closed": if issue.closed_by.login in developer_ids: issues_summary.append( tuple( ( f"{issue.closed_at}", "Issue", "closed", f"{branch_name.rjust(6)}", f"{issue.url}", f"{issue.title}", ) ) ) # END match # END for issue in input_issues return issues_summary
98af172b329c8887666d2ba430ad6e3bda00fe3d
6,292
import torch def train_transforms(image_size, train_img_scale=(0.35, 1), normalize: bool = True, mean=torch.tensor([0.485, 0.456, 0.406]), std=torch.tensor([0.229, 0.224, 0.225])): """Transforms for train augmentation with Kornia.""" transforms = [ AccimageImageToTensorNN(), RandomResizedCrop((image_size, image_size), train_img_scale, keepdim=True), RandomHorizontalFlip(keepdim=True)] if normalize: transforms.append(Normalize(mean=std, std=std, keepdim=True)) return torch.nn.Sequential(*transforms)
957aaf01edf64589d5bd846cac9895077ba43fd0
6,293
def get_spans_bio(tags,id2label=None): """Gets entities from sequence. Args: tags (list): sequence of labels. Returns: list: list of (chunk_type, chunk_start, chunk_end). Example: >>> tags = ['B-PER', 'I-PER', 'O', 'B-LOC'] >>> get_spans_bio(tags) # output [['PER', 0,1], ['LOC', 3, 3]] """ chunks = [] chunk = [-1, -1, -1] for indx, tag in enumerate(tags): if not isinstance(tag, str): tag = id2label[tag] if tag.startswith("B-"): if chunk[2] != -1: chunks.append(chunk) chunk = [-1, -1, -1] chunk[1] = indx chunk[0] = tag.split('-')[1] elif tag.startswith('I-') and chunk[1] != -1: _type = tag.split('-')[1] if _type == chunk[0]: chunk[2] = indx if indx == len(tags) - 1: chunks.append(chunk) else: if chunk[2] != -1: chunks.append(chunk) chunk = [-1, -1, -1] return chunks
9a9e45eedaf7c8700b72af9649cf80b13e276fc8
6,294
def min_count1(lst): """ Get minimal value of list, version 1 :param lst: Numbers list :return: Minimal value and its count on the list """ if len(lst) == 0: return [] count = 0 min_value = lst[0] for num in lst: if num == min_value: count += 1 elif num < min_value: count = 1 min_value = num return [min_value, count]
b441d0a37534909e9a990b91a953d4022698c04b
6,295
def exactly_one_topping(ketchup, mustard, onion): """Return whether the customer wants exactly one of the three available toppings on their hot dog. """ return True if int(ketchup) + int(mustard) + int(onion) == 1 else False
214c95d35c116993dc78740d5d16b874122960ed
6,297
def strip_line_endings(data: list) -> list: """Removes line endings(\n). Removes item if only contains \n.""" return [i.rstrip("\n") for i in data if i != "\n"]
5383b1bc3884395459ca63b6f15c0a1091eaaaf0
6,298
def calculate_afqt_scores(df): """This function calculates the AFQT scores. See information at https://www.nlsinfo.org/content/cohorts/nlsy79/topical-guide/education/aptitude-achievement-intelligence-scores for more details. In addition, we adjust the Numerical Operations score along the lines described in NLS Attachment 106. """ df["NUMERICAL_ADJ"] = df["ASVAB_NUMERICAL_OPERATIONS"] adjust_no = { 0: 0, 1: 0, 2: 1, 3: 2, 7: 8, 8: 9, 9: 10, 10: 11, 11: 12, 12: 14, 13: 15, 14: 16, 15: 17, 16: 18, 17: 19, 18: 21, 19: 22, 20: 23, 21: 24, 22: 25, 23: 26, 24: 27, 25: 28, 26: 29, 27: 30, 28: 31, 29: 33, 30: 34, 31: 35, 32: 36, 33: 37, 34: 38, 35: 39, 36: 39, 37: 40, 38: 41, 39: 42, 40: 43, 41: 44, 42: 45, 43: 46, 44: 47, 45: 48, 46: 49, 47: 49, 48: 50, 49: 50, 50: 50, } df["NUMERICAL_ADJ"].replace(adjust_no, inplace=True) df["AFQT_RAW"] = 0.00 df["AFQT_RAW"] += df["ASVAB_ARITHMETIC_REASONING"] df["AFQT_RAW"] += df["ASVAB_WORD_KNOWLEDGE"] df["AFQT_RAW"] += df["ASVAB_PARAGRAPH_COMPREHENSION"] df["AFQT_RAW"] += 0.5 * df["NUMERICAL_ADJ"] del df["NUMERICAL_ADJ"] # There are a couple of variables for which we can compute AFQT_RAW while there is no AFQT_1 # available. The variable AFQT_1 is set to NAN by the NLSY team if the test procedure was # altered, i.e. variable R06148 (ASVAB_ALTERED_TESTING) takes value 67. However, we noticed # that there are other indicators of problems as well. # # PROFILES, ASVAB VOCATIONAL TEST - NORMAL/ALTERED TESTING # # 11625 51 COMPLETED # 41 52 COMP-CONVERTED REFUSAL # 127 53 COMP-PROBLEM REPORTED # 85 54 COMP-SPANISH INSTR. CARDS # 36 67 COMP-PRODECURES ALTERED # # We followed up with the NLSY staff to get some guidance on how to deal with 51, 52, 53, # 54. The correspondence is available in ``correspondence-altered-testing.pdf'' in the sources # subdirectory. In a nutshell, not detailed information is available anymore on the meaning # of the different realizations. We decided to follow the original decision of the NLSY staff # to only set 67 to NAN. cond = df["ASVAB_ALTERED_TESTING"].isin([67]) df.loc[cond, "AFQT_RAW"] = np.nan # We have a little unit test, where we reconstruct the AFQT_1 variable from the inputs. assert_equal(_test_afqt(df), True) return df
ba6573e40115d766b2c0aebb78a3beb2881fbb4c
6,299
def weighting_system_z(): """Z-weighting filter represented as polynomial transfer function. :returns: Tuple of `num` and `den`. Z-weighting is 0.0 dB for all frequencies and therefore corresponds to a multiplication of 1. """ numerator = [1] denomenator = [1] return numerator, denomenator
8d84c572631c23f50f8a57e388e21fa62e316930
6,300
def shutdown(): """ Shuts down the API (since there is no legit way to kill the thread) Pulled from https://stackoverflow.com/questions/15562446/how-to-stop-flask-application-without-using-ctrl-c """ func = request.environ.get('werkzeug.server.shutdown') if func is None: raise RuntimeError('Not running with the Werkzeug Server') func() return 'Server shutting down...', 200
a5c1a226fac7c912c11415abb08200cbe2e6f1e3
6,301
def on_post_request(): """This function triggers on every POST request to chosen endpoint""" data_sent = request.data.decode('utf-8') return Response(return_animal_noise(data_sent), mimetype='text/plain')
c43343c697bfde9751dc4bb36b7ad162e7578049
6,302
def settings(comid=None, community=None): """Modify a community.""" pending_records = \ len(CommunityRecordsCollection(community).filter({'status': 'P'})) return render_template( 'invenio_communities/settings.html', community=community, comid=comid, pending_records=pending_records)
702b41348b461876ebaae49187a3543dbdcd7d0d
6,303
import math def product_of_basins(): """Return the product of the sizes of the three largest basins.""" max_x = len(values[0]) - 1 max_y = len(values) - 1 def heightmap(x, y): """Return the height value in (xth column, yth row).""" return values[y][x] def is_lowpoint(x, y): """Return True if (x, y) is a lowpoint, else False.""" value = heightmap(x, y) return all((x == 0 or value < heightmap(x - 1, y), # left x == max_x or value < heightmap(x + 1, y), # right y == 0 or value < heightmap(x, y - 1), # up y == max_y or value < heightmap(x, y + 1))) # down def basin_size(x, y): """Return the basin size of the low point (x, y).""" if (x, y) in visited or heightmap(x, y) == 9: return 0 visited.add((x, y)) value = heightmap(x, y) size = 1 if x > 0 and value <= heightmap(x - 1, y): # left size += basin_size(x - 1, y) if x < max_x and value <= heightmap(x + 1, y): # right size += basin_size(x + 1, y) if y > 0 and value <= heightmap(x, y - 1): # up size += basin_size(x, y - 1) if y < max_y and value <= heightmap(x, y + 1): # down size += basin_size(x, y + 1) return size visited = set() basin_sizes = [] lowpoints = ((x, y) for x in range(max_x + 1) for y in range(max_y + 1) if is_lowpoint(x, y)) for x, y in lowpoints: basin_sizes.append(basin_size(x, y)) basin_sizes.sort(reverse=True) return math.prod(basin_sizes[:3])
3624faa5b5d1e991c31f2f0c5f790c68619a0b85
6,304
def singularity26(function): """Decorator to set the global singularity version""" def wrapper(*args, **kwargs): hpccm.config.g_ctype = container_type.SINGULARITY hpccm.config.g_singularity_version = StrictVersion('2.6') return function(*args, **kwargs) return wrapper
a6cdd7f7a8b000a63fa459a38ef6dd3fa0eec037
6,305
def denormalize(series, last_value): """Denormalize the values for a given series. This uses the last value available (i.e. the last closing price of the week before our prediction) as a reference for scaling the predicted results. """ result = last_value * (series + 1) return result
f4c32aa4248378482f1294c54e706e6ee8d5332d
6,306
import warnings def tfidf( s: pd.Series, max_features=None, min_df=1, max_df=1.0, return_feature_names=False ) -> pd.Series.sparse: """ Represent a text-based Pandas Series using TF-IDF. *Term Frequency - Inverse Document Frequency (TF-IDF)* is a formula to calculate the _relative importance_ of the words in a document, taking into account the words' occurences in other documents. It consists of two parts: The *term frequency (tf)* tells us how frequently a term is present in a document, so tf(document d, term t) = number of times t appears in d. The *inverse document frequency (idf)* measures how _important_ or _characteristic_ a term is among the whole corpus (i.e. among all documents). Thus, idf(term t) = log((1 + number of documents) / (1 + number of documents where t is present)) + 1. Finally, tf-idf(document d, term t) = tf(d, t) * idf(t). Different from the `sklearn-implementation of tfidf <https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html>`, this function does *not* normalize the output in any way, so the result is exactly what you get applying the formula described above. The input Series should already be tokenized. If not, it will be tokenized before tfidf is calculated. If working with big pandas Series, you might want to limit the number of features through the max_features parameter. Parameters ---------- s : Pandas Series (tokenized) max_features : int, optional, default to None. If not None, only the max_features most frequent tokens are used. min_df : int, optional, default to 1. When building the vocabulary, ignore terms that have a document frequency (number of documents a term appears in) strictly lower than the given threshold. max_df : int or double, optional, default to 1.0 When building the vocabulary, ignore terms that have a document frequency (number of documents a term appears in) strictly higher than the given threshold. This arguments basically permits to remove corpus-specific stop words. When the argument is a float [0.0, 1.0], the parameter represents a proportion of documents. return_feature_names: Boolean, optional, default to False Whether to return the feature (i.e. word) names with the output. Examples -------- >>> import texthero as hero >>> import pandas as pd >>> s = pd.Series(["Hi Bye", "Test Bye Bye"]) >>> s = hero.tokenize(s) >>> hero.tfidf(s, return_feature_names=True) (document 0 [1.0, 1.4054651081081644, 0.0] 1 [2.0, 0.0, 1.4054651081081644] dtype: object, ['Bye', 'Hi', 'Test']) """ # Check if input is tokenized. Else, print warning and tokenize. if not isinstance(s.iloc[0], list): warnings.warn(_not_tokenized_warning_message, DeprecationWarning) s = preprocessing.tokenize(s) tfidf = TfidfVectorizer( use_idf=True, max_features=max_features, min_df=min_df, max_df=max_df, tokenizer=lambda x: x, preprocessor=lambda x: x, norm=None, # Disable l1/l2 normalization. ) tfidf_vectors_csr = tfidf.fit_transform(s) # Result from sklearn is in Compressed Sparse Row format. # Pandas Sparse Series can only be initialized from Coordinate format. tfidf_vectors_coo = coo_matrix(tfidf_vectors_csr) s_out = pd.Series.sparse.from_coo(tfidf_vectors_coo) # Map word index to word name and keep original index of documents. feature_names = tfidf.get_feature_names() s_out.index = s_out.index.map(lambda x: (s.index[x[0]], feature_names[x[1]])) s_out.rename_axis(["document", "word"], inplace=True) # NOTE: Currently: still convert to flat series instead of representation series. # Will change to return representation series directly in Version 2. s_out = representation_series_to_flat_series( s_out, fill_missing_with=0.0, index=s.index ) if return_feature_names: return s_out, feature_names else: return s_out
56f07d62254b873fc5581af26160d7bf4fc5d7e6
6,307
from typing import Tuple from typing import Any from typing import Dict import time from typing import cast def decorator(fn: AsyncFn, *, expire: int, maxsize: int) -> AsyncFn: """Cache decorator.""" cache = LRUCache(maxsize=maxsize) @wraps(fn) async def wrapper(*args: Tuple[Any, ...], **kwds: Dict[str, Any]) -> Any: """Wrap the original async `fn`. Cached results will be returned if cache hit, otherwise (missing/expired) `fn` will be invoked and its result will be cached. Args: args: Positional arguments in function parameters. kwds: Keyword arguments in function parameters. Returns: The (maybe cached) result of `fn(*args, **kwds)`. """ key = CacheKey.make(args, kwds) value = cache[key] # cache miss/expired if value is None: result = await fn(*args, **kwds) cache[key] = CacheValue(expired=time.monotonic() + expire, data=result) return result return value.data wrapper.__dict__["cache"] = cache wrapper.__dict__["expire"] = expire return cast(AsyncFn, wrapper)
07bcb2181787f5af00098b9de27186a3e6aa1cfa
6,308
def taxon_id(_): """ Always returns 10090, the mouse taxon id. """ return 10090
117fe7f8d56eb9be4ee2b0f4d782b806576faedf
6,311
def rthread_if(data, *forms): """ Similar to rthread, but each form must be a tuple with (test, fn, ...args) and only pass the argument to fn if the boolean test is True. If test is callable, the current value to the callable to decide if fn must be executed or not. Like rthread, Arguments are passed as tuples and the value is passed as the last argument. Examples: >>> sk.rthread_if(20, (True, op.div, 2), (False, op.mul, 4), (sk.is_even, op.add, 2)) 0.1 See Also: :func:`thread` :func:`rthread_if` """ for form in forms: do_it, func, *args = form if callable(do_it): do_it = do_it(data) if do_it: try: data = func(*args, data) except Exception as ex: raise _thread_error(ex, func, (*args, data)) from ex return data
a9afa8576ec3a308d7f1514933e462d4565cf738
6,312
def decompose_matrices(Ks): """ Apply Cholesky decomposition to each matrix in the given list :param Ks: a list of matrices """ Ls = [] for i, K_d in enumerate(Ks): Ls.append(np.linalg.cholesky(K_d)) return Ls
6a14363eab0646f59d3664843ef47c5ad34c5537
6,313
def ndcg_score(y_pre, y_true, k=20): """ get NDCG@k :param y_pre: numpy (batch_size,x) :param y_true: y_truth: list[batch_size][ground_truth_num] :param k: k :return: NDCG@k """ dcg = dcg_score(y_pre, y_true, k) idcg = dcg_score(y_true, y_true, k) return dcg / idcg
0e9e513f4c8e7ceba18c0a12d0c84e08d210ddcd
6,315
from typing import Optional def get_discount_weights( discount_factor: float, traj_len: int, num_trajs: int = 1 ) -> Optional[npt.NDArray[np.float32]]: """ Return the trajectory discount weight array if applicable :param discount_factor: the discount factor by which the displacements corresponding to the k^th timestep will be discounted :param traj_len: len of traj :param optional num_trajs: num of ego trajs, default is set to 1, but it's generalized in case we need to compare multiple ego trajs with expert :return array of discount_weights. """ discount_weights = None if discount_factor != 1.0: # Compute discount_factors pow_arr = np.tile(np.arange(traj_len), (num_trajs, 1)) # type:ignore discount_weights = np.power(discount_factor, pow_arr) return discount_weights
0d09fcf8228b1e04790d7874e0ecfffeae9a009a
6,316
import random def touch_to_square(touch_x, touch_y, num_rows, num_cols): """ Given a touch x and y, convert it to a coordinate on the square. """ x = clamp(maprange((PAD_Y_RANGE_MAX, PAD_Y_RANGE_MIN), (0, num_rows), touch_y) + random.randrange(-1, 2), 0, num_rows - 1) y = clamp(maprange((PAD_X_RANGE_MAX, PAD_X_RANGE_MIN), (0, num_cols), touch_x) + random.randrange(-1, 2), 0, num_cols - 1) return (int(x), int(y))
f7320e7e9738f7e05b3e675c706b28182a12de9a
6,317
def is_valid_scheme(url): """Judge whether url is valid scheme.""" return urlparse(url).scheme in ["ftp", "gopher", "http", "https"]
4240ec4251e8f937c6f755d123b0b52f88057420
6,318
def height_to_transmission(height, material, energy, rho=0, photo_only=False, source='nist'): """ Calculates the resulting x-ray transmission of an object based on the given height (thickness) and for a given material and energy. Parameters ========== height: grating height (thickness) [um] material: chemical formula ('Fe2O3', 'CaMg(CO3)2', 'La1.9Sr0.1CuO4') energy: x-ray energy [keV] rho: density in [g/cm3], default=0 (no density given) photo_only: boolean for returning photo cross-section component only, default=False source: material params LUT... default='nist' Returns ======= transmission: percentage of resulting x-ray transmission """ return 1 - height_to_absorption(height, material, energy, rho, photo_only, source)
54e2933b06e0489fdc521c4f173d516038f32ee8
6,319
def assignModelClusters(keyframe_model, colors): """ Map each colorspace segment to the closest color in the input. Parameters ---------- keyframe_model : FrameScorer colors : numpy array of int, shape (num_colors, 3) """ hsv_mean_img = keyframe_model.hsv_means.copy().reshape(1, keyframe_model.n_clusters, 3) hsv_mean_img_saturated = hsv_mean_img.copy() hsv_mean_img_saturated[:, :, 1] = 1 hsv_mean_img_saturated[:, :, 2] = 1 rgb_mean_img_saturated = imageprocessing.color.hsv2rgb(hsv_mean_img_saturated) # rgb_mean_img = imageprocessing.color.hsv2rgb(hsv_mean_img) # imageprocessing.displayImage(rgb_mean_img) # imageprocessing.displayImage(rgb_mean_img_saturated) rgb_means_saturated = rgb_mean_img_saturated.reshape(keyframe_model.n_clusters, 3) distances = np.array(tuple( np.linalg.norm(rgb_means_saturated - np.array(rgb_color), axis=1) for rgb_color in colors )).T best_idxs = distances.argmin(axis=1) keyframe_model.color_mappings = best_idxs return keyframe_model
d32c093c8931272215bec12d08b4b268da50f184
6,320
def calc_ef_from_bases(x,*args): """ Calculate energies and forces of every samples using bases data. """ global _hl1,_ergs,_frcs,_wgt1,_wgt2,_wgt3,_aml,_bml #.....initialize variables if _nl == 1: _wgt1,_wgt2= vars2wgts(x) elif _nl == 2: _wgt1,_wgt2,_wgt3= vars2wgts(x) es=np.zeros(len(_samples)) fs= [] for smpl in _samples: fs.append(np.zeros((smpl.natm,3))) p= mp.Pool(_nprcs) _hl1= [] _aml= [] _bml= [] if _nprcs == 1: for ismpl in range(len(_samples)): smpl= _samples[ismpl] if _nl == 1: est,fst,hl1s,ams,bms= calc_ef1(ismpl,x,*args) _hl1.append(hl1s) _aml.append(ams) _bml.append(bms) elif _nl == 2: est,fst,hl1s,hl2s,ams,bms,cms= calc_ef2(ismpl,x,*args) _hl1.append(hl1s) _hl2.append(hl2s) _aml.append(ams) _bml.append(bms) _cml.append(cms) es[ismpl]= est for ia in range(smpl.natm): fs[ismpl][ia,0] += fst[ia,0] fs[ismpl][ia,1] += fst[ia,1] fs[ismpl][ia,2] += fst[ia,2] else: func_args=[] if _nl == 1: for ismpl in range(len(_samples)): func_args.append( (calc_ef1,ismpl,x) ) elif _nl == 2: for ismpl in range(len(_samples)): func_args.append( (calc_ef2,ismpl,x) ) results= p.map(arg_wrapper,func_args) p.close() p.join() for ismpl in range(len(_samples)): smpl= _samples[ismpl] if _nl == 1: est,fst,hl1s,ams,bms= results[ismpl] _hl1.append(hl1s) _aml.append(ams) _bml.append(bms) elif _nl == 2: est,fst,hl1s,hl2s,ams,bms,cms= results[ismpl] _hl1.append(hl1s) _hl2.append(hl2s) _aml.append(ams) _bml.append(bms) _cml.append(cms) es[ismpl]= est for ia in range(smpl.natm): fs[ismpl][ia,0] += fst[ia,0] fs[ismpl][ia,1] += fst[ia,1] fs[ismpl][ia,2] += fst[ia,2] # print ' es:' # print es _ergs= es _frcs= fs return (es,fs)
d54c285c04dd8ae948fb13553251f0675c1af4bc
6,322
def HIadj_post_anthesis( NewCond_DelayedCDs, NewCond_sCor1, NewCond_sCor2, NewCond_DAP, NewCond_Fpre, NewCond_CC, NewCond_fpost_upp, NewCond_fpost_dwn, Crop, Ksw): """ Function to calculate adjustment to harvest index for post-anthesis water stress <a href="../pdfs/ac_ref_man_3.pdf#page=119" target="_blank">Reference Manual: harvest index calculations</a> (pg. 110-126) *Arguments:* `InitCond`: `InitCondClass` : InitCond object containing model paramaters `Crop`: `CropClass` : Crop object containing Crop paramaters `Ksw`: `KswClass` : Ksw object containing water stress paramaters *Returns:* `NewCond`: `InitCondClass` : InitCond object containing updated model paramaters """ ## Store initial conditions in a structure for updating ## # NewCond = InitCond InitCond_DelayedCDs = NewCond_DelayedCDs*1 InitCond_sCor1 = NewCond_sCor1*1 InitCond_sCor2 = NewCond_sCor2*1 ## Calculate harvest index adjustment ## # 1. Adjustment for leaf expansion tmax1 = Crop.CanopyDevEndCD - Crop.HIstartCD DAP = NewCond_DAP - InitCond_DelayedCDs if ( (DAP <= (Crop.CanopyDevEndCD + 1)) and (tmax1 > 0) and (NewCond_Fpre > 0.99) and (NewCond_CC > 0.001) and (Crop.a_HI > 0) ): dCor = 1 + (1 - Ksw.Exp) / Crop.a_HI NewCond_sCor1 = InitCond_sCor1 + (dCor / tmax1) DayCor = DAP - 1 - Crop.HIstartCD NewCond_fpost_upp = (tmax1 / DayCor) * NewCond_sCor1 # 2. Adjustment for stomatal closure tmax2 = Crop.YldFormCD DAP = NewCond_DAP - InitCond_DelayedCDs if ( (DAP <= (Crop.HIendCD + 1)) and (tmax2 > 0) and (NewCond_Fpre > 0.99) and (NewCond_CC > 0.001) and (Crop.b_HI > 0) ): # print(Ksw.Sto) dCor = np.power(Ksw.Sto, 0.1) * (1 - (1 - Ksw.Sto) / Crop.b_HI) NewCond_sCor2 = InitCond_sCor2 + (dCor / tmax2) DayCor = DAP - 1 - Crop.HIstartCD NewCond_fpost_dwn = (tmax2 / DayCor) * NewCond_sCor2 # Determine total multiplier if (tmax1 == 0) and (tmax2 == 0): NewCond_Fpost = 1 else: if tmax2 == 0: NewCond_Fpost = NewCond_fpost_upp else: if tmax1 == 0: NewCond_Fpost = NewCond_fpost_dwn elif tmax1 <= tmax2: NewCond_Fpost = NewCond_fpost_dwn * ( ((tmax1 * NewCond_fpost_upp) + (tmax2 - tmax1)) / tmax2 ) else: NewCond_Fpost = NewCond_fpost_upp * ( ((tmax2 * NewCond_fpost_dwn) + (tmax1 - tmax2)) / tmax1 ) return ( NewCond_sCor1, NewCond_sCor2, NewCond_fpost_upp, NewCond_fpost_dwn, NewCond_Fpost)
9101dba12642dc7f1f4b0bbe9f35d7e926f6af0a
6,323
def packify(fmt=u'8', fields=[0x00], size=None, reverse=False): """ Packs fields sequence of bit fields into bytearray of size bytes using fmt string. Each white space separated field of fmt is the length of the associated bit field If not provided size is the least integer number of bytes that hold the fmt. If reverse is true reverse the order of the bytes in the byte array before returning. This is useful for converting between bigendian and littleendian. Assumes unsigned fields values. Assumes network big endian so first fields element is high order bits. Each field in format string is number of bits for the associated bit field Fields with length of 1 are treated as has having boolean truthy field values that is, nonzero is True and packs as a 1 for 2+ length bit fields the field element is truncated to the number of low order bits in the bit field if sum of number of bits in fmt less than size bytes then the last byte in the bytearray is right zero padded if sum of number of bits in fmt greater than size bytes returns exception to pad just use 0 value in source field. example packify("1 3 2 2", (True, 4, 0, 3)). returns bytearry([0xc3]) """ tbfl = sum((int(x) for x in fmt.split())) if size is None: size = (tbfl // 8) + 1 if tbfl % 8 else tbfl // 8 if not (0 <= tbfl <= (size * 8)): raise ValueError("Total bit field lengths in fmt not in [0, {0}]".format(size * 8)) n = 0 bfp = 8 * size # starting bit field position bu = 0 # bits used for i, bfmt in enumerate(fmt.split()): bits = 0x00 bfl = int(bfmt) bu += bfl if bfl == 1: if fields[i]: bits = 0x01 else: bits = 0x00 else: bits = fields[i] & (2**bfl - 1) # bit-and mask out high order bits bits <<= (bfp - bfl) #shift left to bit position less bit field size n |= bits # bit-or in bits bfp -= bfl #adjust bit field position for next element return bytify(n=n, size=size, reverse=reverse, strict=True)
882d4fd9e3ec626f499f7c4653f6c3864ad64095
6,324
def fix_conf_params(conf_obj, section_name): """from a ConfigParser object, return a dictionary of all parameters for a given section in the expected format. Because ConfigParser defaults to values under [DEFAULT] if present, these values should always appear unless the file is really bad. :param configparser_object: ConfigParser instance :param section_name: string of section name in config file (e.g. "MyBank" matches "[MyBank]" in file) :return: dict with all parameters """ config = { "input_columns": ["Input Columns", False, ","], "output_columns": ["Output Columns", False, ","], "input_filename": ["Source Filename Pattern", False, ""], "path": ["Source Path", False, ""], "ext": ["Source Filename Extension", False, ""], "regex": ["Use Regex For Filename", True, ""], "fixed_prefix": ["Output Filename Prefix", False, ""], "input_delimiter": ["Source CSV Delimiter", False, ""], "header_rows": ["Header Rows", False, ""], "footer_rows": ["Footer Rows", False, ""], "date_format": ["Date Format", False, ""], "delete_original": ["Delete Source File", True, ""], "cd_flags": ["Inflow or Outflow Indicator", False, ","], "payee_to_memo": ["Use Payee for Memo", True, ""], "plugin": ["Plugin", False, ""], "api_token": ["YNAB API Access Token", False, ""], "api_account": ["YNAB Account ID", False, "|"], } for key in config: config[key] = get_config_line(conf_obj, section_name, config[key]) config["bank_name"] = section_name # quick n' dirty fix for tabs as delimiters if config["input_delimiter"] == "\\t": config["input_delimiter"] = "\t" return config
55cdb572e2b45f437c583429ed9ee61f0de9b3de
6,325
def sackStringToSack(sackString): """ C{sackString} is a C{str}. Returns a L{window.SACK}. """ try: # If not enough args for split, Python raises ValueError joinedSackList, ackNumberStr = sackString.rsplit('|', 1) ackNumber = strToIntInRange(ackNumberStr, -1, 2**53) sackList = tuple(strToNonNegLimit(s, 2**53) for s in joinedSackList.split(',')) if joinedSackList else () except ValueError: raise InvalidSackString("bad sack") return SACK(ackNumber, sackList)
9fd5ef91f6e897758f47de006a582b5b1ec99f82
6,326
def setup_graph(event, sta, chan, band, tm_shape, tm_type, wm_family, wm_type, phases, init_run_name, init_iteration, fit_hz=5, uatemplate_rate=1e-4, smoothing=0, dummy_fallback=False, raw_signals=False, init_templates=False, **kwargs): """ Set up the graph with the signal for a given training event. """ s = Sigvisa() cursor = s.dbconn.cursor() try: input_runid = get_fitting_runid(cursor, init_run_name, init_iteration, create_if_new = False) runids = (input_runid,) print "input_runid", input_runid except RunNotFoundException: runids = () sg = SigvisaGraph(template_model_type=tm_type, template_shape=tm_shape, wiggle_model_type=wm_type, wiggle_family=wm_family, phases=phases, runids = runids, uatemplate_rate=uatemplate_rate, min_mb=1.0, dummy_fallback=dummy_fallback, raw_signals=raw_signals, **kwargs) filter_str = band if not raw_signals: filter_str += ";env" wave = load_event_station_chan(event.evid, sta, chan, cursor=cursor, exclude_other_evs=True, phases=None if phases=="leb" else phases, pre_s=100.0).filter(filter_str) cursor.close() if smoothing > 0: wave = wave.filter('smooth_%d' % smoothing) if fit_hz != wave['srate']: wave = wave.filter('hz_%.2f' % fit_hz) if len(mask_blocks(wave.data.mask)) > 2: raise Exception("wave contains missing data") if (not raw_signals) and (np.sum(wave.data < 0.0001) > 10): raise Exception("wave contains regions of zeros") sg.add_wave(wave=wave, init_extra_noise=True) evnodes = sg.add_event(ev=event) eid = evnodes["lon"].eid stddevs = {"time": 2.0, "mb": 0.2} sg.observe_event(eid=eid, ev=event, stddevs=stddevs) if init_templates: fitid = get_previous_fitid(input_runid, event.evid, sta) set_templates_from_fitid(sg, 1, fitid, wave) #sg.fix_arrival_times() phases = sg.ev_arriving_phases(1, wave["sta"]) assert( "P" in phases or "Pg" in phases or "Pn" in phases or "pP" in phases) return sg
45add3585b61db404a9edb31fe7363677c6cbaec
6,327
def getLogisticModelNames(config): """ Get the names of the models present in the configobj Args: config: configobj object defining the model and its inputs. Returns: list: list of model names. """ names = [] lmodel_space = config for key, value in lmodel_space.items(): if isinstance(value, str): continue else: # this is a model names.append(key) return names
f7f82b12eb50a58c92970b5c2a8f99eb01945523
6,328
def checkfileCopyright(filename): """ return true if file has already a Copyright in first X lines """ infile = open(filename, 'r') for x in xrange(6): x = x line = infile.readline() if "Copyright" in line or "copyright" in line: return True return False
567b485a58e46796238a109de935904d747679c7
6,329
def TopicFormat(topic_name, topic_project=''): """Formats a topic name as a fully qualified topic path. Args: topic_name: (string) Name of the topic to convert. topic_project: (string) Name of the project the given topic belongs to. If not given, then the project defaults to the currently selected cloud project. Returns: Returns a fully qualified topic path of the form project/foo/topics/topic_name. """ return TopicIdentifier(topic_name, topic_project).GetFullPath()
e8a3d28cc81b7a31a2243b68c77aef77449c1b97
6,330
def mp0(g0): """Return 0th order free energy.""" return g0.sum()
5aa3580fec1322bd7b4e357ec6bee4d52fae592e
6,331
def create_diamond(color=None): """ Creates a diamond. :param color: Diamond color :type color: list :return: OpenGL list """ # noinspection PyArgumentEqualDefault a = Point3(-1.0, -1.0, 0.0) # noinspection PyArgumentEqualDefault b = Point3(1.0, -1.0, 0.0) # noinspection PyArgumentEqualDefault c = Point3(1.0, 1.0, 0.0) # noinspection PyArgumentEqualDefault d = Point3(-1.0, 1.0, 0.0) # noinspection PyArgumentEqualDefault e = Point3(0.0, 0.0, 1.0) # noinspection PyArgumentEqualDefault f = Point3(0.0, 0.0, -1.0) obj = _gl.glGenLists(1) _gl.glNewList(obj, _gl.GL_COMPILE) _gl.glPushMatrix() if color is not None: _gl.glColor4fv(color) _gl.glBegin(_gl.GL_TRIANGLES) draw_vertex_list_create_normal([a, b, e]) draw_vertex_list_create_normal([b, c, e]) draw_vertex_list_create_normal([c, d, e]) draw_vertex_list_create_normal([d, a, e]) draw_vertex_list_create_normal([b, a, f]) draw_vertex_list_create_normal([c, b, f]) draw_vertex_list_create_normal([d, c, f]) draw_vertex_list_create_normal([a, d, f]) _gl.glEnd() _gl.glPopMatrix() _gl.glEndList() return obj
421939be392abdba6ccedb8a946a93ebe35fb612
6,332
import copy import collections def merge_dicts(*dicts): """ Recursive dict merge. Instead of updating only top-level keys, dict_merge recurses down into dicts nested to an arbitrary depth, updating keys. """ assert len(dicts) > 1 dict_ = copy.deepcopy(dicts[0]) for merge_dict in dicts[1:]: for k, v in merge_dict.items(): if (k in dict_ and isinstance(dict_[k], dict) and isinstance(merge_dict[k], collections.Mapping)): dict_[k] = merge_dicts(dict_[k], merge_dict[k]) else: dict_[k] = merge_dict[k] return dict_
6595343694b80928417c2a1f096cf4587f3dccbc
6,333
def getAllFWImageIDs(fwInvDict): """ gets a list of all the firmware image IDs @param fwInvDict: the dictionary to search for FW image IDs @return: list containing string representation of the found image ids """ idList = [] for key in fwInvDict: if 'Version' in fwInvDict[key]: idList.append(key.split('/')[-1]) return idList
54bbd28b80905c7b48e5ddc3e61187f5b5ad5f6a
6,334
def document_uris_from_data(document_data, claimant): """ Return one or more document URI dicts for the given document data. Returns one document uri dict for each document equivalence claim in document_data. Each dict can be used to init a DocumentURI object directly:: document_uri = DocumentURI(**document_uri_dict) Always returns at least one "self-claim" document URI whose URI is the claimant URI itself. :param document_data: the "document" sub-object that was POSTed to the API as part of a new or updated annotation :type document_data: dict :param claimant: the URI that the browser was at when this annotation was created (the top-level "uri" field of the annotation) :type claimant: unicode :returns: a list of one or more document URI dicts :rtype: list of dicts """ document_uris = document_uris_from_links(document_data.get("link", []), claimant) document_uris.extend( document_uris_from_highwire_pdf(document_data.get("highwire", {}), claimant) ) document_uris.extend( document_uris_from_highwire_doi(document_data.get("highwire", {}), claimant) ) document_uris.extend(document_uris_from_dc(document_data.get("dc", {}), claimant)) document_uris.append(document_uri_self_claim(claimant)) for document_uri in document_uris: uri = document_uri["uri"] if uri: document_uri["uri"] = uri.strip() document_uris = [d for d in document_uris if d["uri"]] return document_uris
70f02c61f8cb1be21dd094c696f257e565d7c04c
6,336
def get_bond_angle_index(edge_index): """ edge_index: (2, E) bond_angle_index: (3, *) """ def _add_item( node_i_indices, node_j_indices, node_k_indices, node_i_index, node_j_index, node_k_index): node_i_indices += [node_i_index, node_k_index] node_j_indices += [node_j_index, node_j_index] node_k_indices += [node_k_index, node_i_index] E = edge_index.shape[1] node_i_indices = [] node_j_indices = [] node_k_indices = [] for edge_i in range(E - 1): for edge_j in range(edge_i + 1, E): a0, a1 = edge_index[:, edge_i] b0, b1 = edge_index[:, edge_j] if a0 == b0 and a1 == b1: continue if a0 == b1 and a1 == b0: continue if a0 == b0: _add_item(node_i_indices, node_j_indices, node_k_indices, a1, a0, b1) if a0 == b1: _add_item(node_i_indices, node_j_indices, node_k_indices, a1, a0, b0) if a1 == b0: _add_item(node_i_indices, node_j_indices, node_k_indices, a0, a1, b1) if a1 == b1: _add_item(node_i_indices, node_j_indices, node_k_indices, a0, a1, b0) node_ijk = np.array([node_i_indices, node_j_indices, node_k_indices]) uniq_node_ijk = np.unique(node_ijk, axis=1).astype('int64') # (3, *) return uniq_node_ijk
7660b6b27b2a028092d39cac5e1b9dfcf6973984
6,337
def get_config(section=None, option=None): """Return dpm configuration objects. :param section: the name of the section in the ini file, e.g. "index:ckan". - May be omitted only when no other parameters are provided - Must be omitted elsewhere :type section: str :param option: the name of the option to be retrieved from the section of the ini file, e.g. 'ckan.api_key' - Can be omitted if a section is provided - Must be omitted if no section is provided :type option: str :return: [str, str, .., str] -- The section names of the ini file, when no section and no option are provided -- e.g. ['dpm', 'index:ckan', 'index:db', 'upload:ckan'] [str, str, .., str] -- The option names of the ini file for a given section -- e.g.['ckan.url', 'ckan.api_key'] [str] -- The option value if a valid section and a valid option name are given. -- e.g. ['http://thedatahub.org/api/'] """ if not section and not option: return dpm.CONFIG.sections() elif section and not option: return dpm.CONFIG.options(section) elif section and option: return dpm.CONFIG.get(section, option) else: raise ValueError("Please provide no parameters OR just section OR both section and option")
e4910cd804593da8a6fd4b1fae7f3bd3fcd32f2b
6,338
def match_intervals(intervals_from, intervals_to, strict=True): """Match one set of time intervals to another. This can be useful for tasks such as mapping beat timings to segments. Each element ``[a, b]`` of ``intervals_from`` is matched to the element ``[c, d]`` of ``intervals_to`` which maximizes the Jaccard similarity between the intervals:: max(0, |min(b, d) - max(a, c)|) / |max(d, b) - min(a, c)| In ``strict=True`` mode, if there is no interval with positive intersection with ``[a,b]``, an exception is thrown. In ``strict=False`` mode, any interval ``[a, b]`` that has no intersection with any element of ``intervals_to`` is instead matched to the interval ``[c, d]`` which minimizes:: min(|b - c|, |a - d|) that is, the disjoint interval [c, d] with a boundary closest to [a, b]. .. note:: An element of ``intervals_to`` may be matched to multiple entries of ``intervals_from``. Parameters ---------- intervals_from : np.ndarray [shape=(n, 2)] The time range for source intervals. The ``i`` th interval spans time ``intervals_from[i, 0]`` to ``intervals_from[i, 1]``. ``intervals_from[0, 0]`` should be 0, ``intervals_from[-1, 1]`` should be the track duration. intervals_to : np.ndarray [shape=(m, 2)] Analogous to ``intervals_from``. strict : bool If ``True``, intervals can only match if they intersect. If ``False``, disjoint intervals can match. Returns ------- interval_mapping : np.ndarray [shape=(n,)] For each interval in ``intervals_from``, the corresponding interval in ``intervals_to``. See Also -------- match_events Raises ------ ParameterError If either array of input intervals is not the correct shape If ``strict=True`` and some element of ``intervals_from`` is disjoint from every element of ``intervals_to``. Examples -------- >>> ints_from = np.array([[3, 5], [1, 4], [4, 5]]) >>> ints_to = np.array([[0, 2], [1, 3], [4, 5], [6, 7]]) >>> librosa.util.match_intervals(ints_from, ints_to) array([2, 1, 2], dtype=uint32) >>> # [3, 5] => [4, 5] (ints_to[2]) >>> # [1, 4] => [1, 3] (ints_to[1]) >>> # [4, 5] => [4, 5] (ints_to[2]) The reverse matching of the above is not possible in ``strict`` mode because ``[6, 7]`` is disjoint from all intervals in ``ints_from``. With ``strict=False``, we get the following: >>> librosa.util.match_intervals(ints_to, ints_from, strict=False) array([1, 1, 2, 2], dtype=uint32) >>> # [0, 2] => [1, 4] (ints_from[1]) >>> # [1, 3] => [1, 4] (ints_from[1]) >>> # [4, 5] => [4, 5] (ints_from[2]) >>> # [6, 7] => [4, 5] (ints_from[2]) """ if len(intervals_from) == 0 or len(intervals_to) == 0: raise ParameterError("Attempting to match empty interval list") # Verify that the input intervals has correct shape and size valid_intervals(intervals_from) valid_intervals(intervals_to) try: return __match_intervals(intervals_from, intervals_to, strict=strict) except ParameterError as exc: raise ParameterError( "Unable to match intervals with strict={}".format(strict) ) from exc
a3b523b5aafd77a2fc1026c183ae6a690ec3538c
6,339
def correlate(x, y, margin, method='pearson'): """ Find delay and correlation between x and each column o y Parameters ---------- x : `pandas.Series` Main signal y : `pandas.DataFrame` Secondary signals method : `str`, optional Correlation method. Defaults to `pearson`. Options: `pearson`,`robust`,`kendall`,`spearman` Returns ------- `(List[float], List[int])` List of correlation coefficients and delays in samples in the same order as y's columns Notes ----- Uses the pandas method corrwith (which can return pearson, kendall or spearman coefficients) to correlate. If robust correlation is used, the mapping presented in [1]_ is used and then Pearson correlation is used. To speedup the lag finding, the delays are calculated in log intervals and then interpolated by splines, as shown in [2]_, and the lag with maximum correlation found in this interpolated function is then used as the delay. References ---------- .. [1] Raymaekers, J., Rousseeuw, P. "Fast Robust Correlation for High-Dimensional Data", Technometrics, vol. 63, Pages 184-198, 2021 .. [2] Sakurai, Yasushi & Papadimitriou, Spiros & Faloutsos, Christos. (2005). BRAID: Stream mining through group lag correlations. Proceedings of the ACM SIGMOD International Conference on Management of Data. 599-610. """ beg, end = (x.index.min(), x.index.max()) y = interpolate(y,x.index,margin) if(method == 'robust'): method='pearson' x = pd.Series(z(sig.detrend(x)), index=x.index, name=x.name) x = x.apply(g) y = y.apply(lambda s: z(sig.detrend(s))).applymap(g) N = int(x.size*margin) l = int(np.log2(N)) b = 4 log_lags = np.array([int(2**i+(j*2**i/b)) for i in range(2,l+1) for j in range(4) if 2**i+(j*2**i/b) < N]) log_lags = list(-1*log_lags)[::-1]+[-3,-2,-1,0,1,2,3]+list(log_lags) new_lags = list(range(-1*max(log_lags),max(log_lags)+1)) vals = pd.DataFrame([lagged_corr(x,y,lag,method) for lag in log_lags]) vals = vals.apply(lambda s: inter.make_interp_spline(log_lags, abs(s),k=3)(new_lags)) peaks = vals.apply(lambda s: pd.Series([new_lags[i] for i in sig.find_peaks(s)[0]]+[new_lags[max(range(len(s)), key=s.__getitem__)]]).drop_duplicates()) peak_corr = pd.DataFrame(np.array([[x.corr((y[col].shift(int(peak)))[beg:end], method=method) if not pd.isna(peak) else 0 for peak in peaks[col]] for col in peaks]).transpose(), columns=y.columns) dela = [peak_corr[col].abs().idxmax() for col in peak_corr] delays = [int(peaks[col].iloc[dela[pos]]) for pos, col in enumerate(peak_corr)] corrs = [round(peak_corr[col].iloc[dela[pos]],2) for pos, col in enumerate(peak_corr)] return corrs, delays
45800fd580ad257a8f4663c06577860f952a9a79
6,340
def sortList2(head: ListNode) -> ListNode: """down2up""" h, length, intv = head, 0, 1 while h: h, length = h.next, length + 1 res = ListNode(0) res.next = head # merge the list in different intv. while intv < length: pre, h = res, res.next while h: # get the two merge head `h1`, `h2` h1, i = h, intv while i and h: h, i = h.next, i - 1 if i: break # no need to merge because the `h2` is None. h2, i = h, intv while i and h: h, i = h.next, i - 1 c1, c2 = intv, intv - i # the `c2`: length of `h2` can be small than the `intv`. # merge the `h1` and `h2`. while c1 and c2: if h1.val < h2.val: pre.next, h1, c1 = h1, h1.next, c1 - 1 else: pre.next, h2, c2 = h2, h2.next, c2 - 1 pre = pre.next pre.next = h1 if c1 else h2 while c1 > 0 or c2 > 0: pre, c1, c2 = pre.next, c1 - 1, c2 - 1 pre.next = h intv *= 2 return res.next
02ffae2012847b952197f1ed4c2af2178a552b4d
6,341
def _inverse_frequency_max(searcher, fieldname, term): """ Inverse frequency smooth idf schema """ n = searcher.doc_frequency(fieldname, term) maxweight = searcher.term_info(fieldname, term).max_weight() return log(1 + (maxweight / n), 10) if n != 0.0 else 0.0
e24497c2d67600b9744c5fafb7b503853c54d76c
6,342
def ha(data): """ Hadamard Transform This function is very slow. Implement a Fast Walsh-Hadamard Transform with sequency/Walsh ordering (FWHT_w) for faster tranforms. See: http://en.wikipedia.org/wiki/Walsh_matrix http://en.wikipedia.org/wiki/Fast_Hadamard_transform """ # implementation is a proof of concept and EXTEMEMLY SLOW # determind the order and final size of input vectors ord = int(np.ceil(np.log2(data.shape[-1]))) # Walsh/Hadamard order max = 2**ord # zero fill to power of 2 pad = max - data.shape[-1] zdata = zf(data,pad) # Multiple each vector by the hadamard matrix nat = np.zeros(zdata.shape,dtype=zdata.dtype) H = hadamard(max) nat = np.dot(zdata,H) nat = np.array(nat,dtype=data.dtype) # Bit-Reversal Permutation s = [int2bin(x,digits=ord)[::-1] for x in range(max)] brp = [bin2int(x) for x in s] brp_data = np.take(nat,brp,axis=-1) # Gray code permutation (bit-inverse) gp = gray(ord) gp_data = np.take(brp_data,gp,axis=-1) return gp_data
e46eb465e67ffe61872cdb321cbb642fb8a1a094
6,343
from typing import Dict def most_repeated_character(string: str) -> str: """ Find the most repeated character in a string. :param string: :return: """ map: Dict[str, int] = {} for letter in string: if letter not in map: map[letter] = 1 else: map[letter] += 1 return sorted(map.items(), key=lambda item: item[1], reverse=True)[0][0]
c59a1e0a552f12c7561ecdb11530f98f15076cdc
6,344
def transitions(bits): """Count the number of transitions in a bit sequence. >>> assert transitions([0, 0]) == 0 >>> assert transitions([0, 1]) == 1 >>> assert transitions([1, 1]) == 0 >>> assert transitions([1, 0]) == 1 >>> assert transitions([0, 0, 0]) == 0 >>> assert transitions([0, 1, 0]) == 2 >>> assert transitions([1, 1, 0]) == 1 >>> assert transitions([1, 0, 0]) == 1 >>> assert transitions([0, 0, 1]) == 1 >>> assert transitions([0, 1, 1]) == 1 >>> assert transitions([1, 1, 1]) == 0 >>> assert transitions([1, 0, 1]) == 2 """ transitions = 0 for i in range(0, len(bits)-1): if bits[i] != bits[i+1]: transitions += 1 return transitions
bc65f7b57508fc0c34275c4794d73c106bce07fd
6,346
def _convert_code(code): """ 将聚宽形式的代码转化为 xalpha 形式 :param code: :return: """ no, mk = code.split(".") if mk == "XSHG": return "SH" + no elif mk == "XSHE": return "SZ" + no
11ffcde407da7afaaf0eb28a80244d85f5136199
6,347
def _is_arg_name(s, index, node): """Search for the name of the argument. Right-to-left.""" if not node.arg: return False return s[index : index+len(node.arg)] == node.arg
b0c995ea553184f266fd968ad60b4c5fb19a55d4
6,348
def goodness(signal, freq_range=None, D=None): """Compute the goodness of pitch of a signal.""" if D is None: D = libtfr.dpss(len(signal), 1.5, 1)[0] signal = signal * D[0, :] if freq_range is None: freq_range = 256 if np.all(signal == 0): return 0 else: return np.max(cepstrum(signal)[25:freq_range])
00a44a373f56cd07570a89cef9b688f0aae4dd39
6,351
import socket import fcntl import struct def get_ip_address(dev="eth0"): """Retrieves the IP address via SIOCGIFADDR - only tested on Linux.""" try: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) return socket.inet_ntoa(fcntl.ioctl(s.fileno(),0x8915,struct.pack('256s', dev[:15]))[20:24]) except: return None
96f59f17937543ed9cd4652af4703eaf975b8069
6,353
def connect(**kwargs): # pylint: disable=unused-argument """ mock get-a-connection """ return MockConn()
a51dd696411e5572344313a73b89d0431bcb5bdf
6,354
def new_organization(request): """Creates a new organization.""" if request.method == 'POST': new_organization_form = OrganizationForm(request.POST) if new_organization_form.is_valid(): new_organization = new_organization_form.save(commit=False) new_organization.owner = request.user new_organization.save() new_organization.editors.add(request.user) return redirect(reverse('competencies:organizations')) new_organization_form = OrganizationForm() return render_to_response('competencies/new_organization.html', {'new_organization_form': new_organization_form,}, context_instance=RequestContext(request))
2ef5e47a3d42ef3c2ce2dee055cb5311f984496d
6,355
def _build_obs_freq_mat(acc_rep_mat): """ build_obs_freq_mat(acc_rep_mat): Build the observed frequency matrix, from an accepted replacements matrix The acc_rep_mat matrix should be generated by the user. """ # Note: acc_rep_mat should already be a half_matrix!! total = float(sum(acc_rep_mat.values())) obs_freq_mat = ObservedFrequencyMatrix(alphabet=acc_rep_mat.alphabet, build_later=1) for i in acc_rep_mat: obs_freq_mat[i] = acc_rep_mat[i] / total return obs_freq_mat
8a400ca64c3907ee8c09a5e33f9c45703f267d45
6,356
def strip(s): """strip(s) -> string Return a copy of the string s with leading and trailing whitespace removed. """ i, j = 0, len(s) while i < j and s[i] in whitespace: i = i+1 while i < j and s[j-1] in whitespace: j = j-1 return s[i:j]
7edc91baf8e57e713b464060c05f954510219d34
6,357
def test(seriesList): """This is a test function""" return seriesList
70eb3f5a518533e6243bed74931d5829c9546e2b
6,360
def perform_timeseries_analysis_iterative(dataset_in, intermediate_product=None, no_data=-9999): """ Description: ----- Input: dataset_in (xarray.DataSet) - dataset with one variable to perform timeseries on Output: dataset_out (xarray.DataSet) - dataset containing variables: normalized_data, total_data, total_clean """ data_vars = list(dataset_in.data_vars) key = data_vars[0] data = dataset_in[key].astype('float') processed_data = data.copy(deep=True) processed_data.values[data.values == no_data] = 0 processed_data_sum = processed_data.sum('time') clean_data = data.copy(deep=True) clean_data.values[data.values != no_data] = 1 clean_data.values[data.values == no_data] = 0 clean_data_sum = clean_data.sum('time') if intermediate_product is None: processed_data_normalized = processed_data_sum/clean_data_sum processed_data_normalized.values[np.isnan(processed_data_normalized.values)] = 0 dataset_out = xr.Dataset({'normalized_data': processed_data_normalized, 'total_data': processed_data_sum, 'total_clean': clean_data_sum}, coords={'latitude': dataset_in.latitude, 'longitude': dataset_in.longitude}) else: dataset_out = intermediate_product.copy(deep=True) dataset_out['total_data'] += processed_data_sum dataset_out['total_clean'] += clean_data_sum processed_data_normalized = dataset_out['total_data'] / dataset_out['total_clean'] processed_data_normalized.values[np.isnan(processed_data_normalized.values)] = 0 dataset_out['normalized_data'] = processed_data_normalized return dataset_out
63e3211db70a2ae12db7d1d26a5dad89f308816f
6,361
def getDayOfYear(date): # type: (Date) -> int """Extracts the day of the year from a date. The first day of the year is day 1. Args: date: The date to use. Returns: An integer that is representative of the extracted value. """ print(date) return _now().timetuple().tm_yday
25d7c150a4d7be2e6ae275b10b01e67517ba6cdb
6,362
def predict(network, X_test): """신경망에서 사용되는 가중치 행렬들과 테스트 데이터를 파라미터로 전달받아서, 테스트 데이터의 예측값(배열)을 리턴. 파라미터 X_test: 10,000개의 테스트 이미지들의 정보를 가지고 있는 배열 """ y_pred = [] for sample in X_test: # 테스트 세트의 각 이미지들에 대해서 반복 # 이미지를 신경망에 전파(통과)시켜서 어떤 숫자가 될 지 확률을 계산. sample_hat = forward(network, sample) # 가장 큰 확률의 인덱스(-> 예측값)를 찾음. sample_pred = np.argmax(sample_hat) y_pred.append(sample_pred) # 예측값을 결과 리스트에 추가 return np.array(y_pred)
63ac50b7787c6dd89f04532b0a6266fa4d0f7012
6,363
def __sanitize_close_input(x, y): """ Makes sure that both x and y are ht.DNDarrays. Provides copies of x and y distributed along the same split axis (if original split axes do not match). """ def sanitize_input_type(x, y): """ Verifies that x is either a scalar, or a ht.DNDarray. If a scalar, x gets wrapped in a ht.DNDarray. Raises TypeError if x is neither. """ if not isinstance(x, dndarray.DNDarray): if np.ndim(x) == 0: dtype = getattr(x, "dtype", float) device = getattr(y, "device", None) x = factories.array(x, dtype=dtype, device=device) else: raise TypeError("Expected DNDarray or numeric scalar, input was {}".format(type(x))) return x x = sanitize_input_type(x, y) y = sanitize_input_type(y, x) # Do redistribution out-of-place # If only one of the tensors is distributed, unsplit/gather it if x.split is not None and y.split is None: t1 = manipulations.resplit(x, axis=None) return t1, y elif x.split != y.split: t2 = manipulations.resplit(y, axis=x.split) return x, t2 else: return x, y
7f3cfc44a47493fcf18c179556c388f9d9e9c643
6,365
def table_information_one(soup, div_id_name: str = None) -> dict: """ first method for bringing back table information as a dict. works on: parcelInfo SummaryPropertyValues SummarySubdivision """ table = [] for x in soup.find_all("div", {"id": div_id_name}): for div in x.find_all("div"): for row in x.find_all("tr"): cols = row.find_all("td") cols = [element.text.strip() for element in cols if element] table.extend(cols) it = iter(table) test_dict = dict(zip(it, it)) if test_dict.get(""): del test_dict[""] return test_dict
3b317faff07bff028d43f20b7cfaa8afa587ca50
6,367
import functools def Eval_point_chan(state, chan, data): """External validity, along a channel, where point-data is a pulled back along the channel """ # for each element, state.sp.get(*a), of the codomain vals = [(chan >> state)(*a) ** data(*a) for a in data.sp.iter_all()] val = functools.reduce(lambda p1, p2: p1 * p2, vals, 1) return val
99355101853f3caa5c75b7e3f47aa5439a11aef1
6,368
import pyranges as pr def dfi2pyranges(dfi): """Convert dfi to pyranges Args: dfi: pd.DataFrame returned by `load_instances` """ dfi = dfi.copy() dfi['Chromosome'] = dfi['example_chrom'] dfi['Start'] = dfi['pattern_start_abs'] dfi['End'] = dfi['pattern_end_abs'] dfi['Name'] = dfi['pattern'] dfi['Score'] = dfi['contrib_weighted_p'] dfi['Strand'] = dfi['strand'] return pr.PyRanges(dfi)
98ce4fbac93f81a6022d1cc012ca5270d7d681f3
6,369
def cleared_nickname(nick: str) -> str: """Perform nickname clearing on given nickname""" if nick.startswith(('+', '!')): nick = nick[1:] if nick.endswith('#'): nick = nick[:-1] if all(nick.rpartition('(')): nick = nick.rpartition('(')[0] return nick
f3a5c838f0518a929dfa8b65f83a1d4c6e6dbbe4
6,370
def validate_model_on_lfw( strategy, model, left_pairs, right_pairs, is_same_list, ) -> float: """Validates the given model on the Labeled Faces in the Wild dataset. ### Parameters model: The model to be tested. dataset: The Labeled Faces in the Wild dataset, loaded from load_lfw\ function. pairs: List of LFW pairs, loaded from load_lfw_pairs function. ### Returns (accuracy_mean, accuracy_std, validation_rate, validation_std, far,\ auc, eer) - Accuracy Mean, Accuracy Standard Deviation, Validation Rate,\ Validation Standard Deviation, FAR, Area Under Curve (AUC) and Equal Error\ Rate (EER). """ embeddings, is_same_list = _get_embeddings( strategy, model, left_pairs, right_pairs, is_same_list, ) tpr, fpr, accuracy, val, val_std, far = evaluate(embeddings, is_same_list) auc = metrics.auc(fpr, tpr) eer = brentq(lambda x: 1.0 - x - interpolate.interp1d(fpr, tpr)(x), 0.0, 1.0) return np.mean(accuracy), np.std(accuracy), val, val_std, far, auc, eer
ead4ed84c53b0114c86ecf928d44114b5d896373
6,371
import asyncio def nlu_audio(settings, logger): """Wrapper for NLU audio""" speech_args = settings['speech'] loop = asyncio.get_event_loop() interpretations = {} with Recorder(loop=loop) as recorder: interpretations = loop.run_until_complete(understand_audio( loop, speech_args['url'], speech_args['app_id'], unhexlify(speech_args['app_key']), # context_tag=credentials['context_tag'], "master", speech_args['language'], recorder=recorder, logger=logger)) # loop.close() if interpretations is False: # The user did not speak return {} else: return interpretations
4d9c5eeacc0c1c36cad4575cc774b3faded33c23
6,373
def _gauss(sigma, n_sigma=3): """Discrete, normalized Gaussian centered on zero. Used for filtering data. Args: sigma (float): standard deviation of Gaussian n_sigma (float): extend x in each direction by ext_x * sigma Returns: ndarray: discrete Gaussian curve """ x_range = n_sigma * sigma x = np.arange(-x_range, x_range + 1e-5, 1, dtype=float) y = 1 / (sigma * np.sqrt(2 * np.pi)) * np.exp(-0.5 * (x / sigma)**2) return y
baea764c15c33d99f26bbe803844e06df97d908e
6,374
def create_tentacle_mask(points, height, width, buzzmobile_width, pixels_per_m): """Creates a mask of a tentacle by drawing the points as a line.""" tentacle_mask = np.zeros((height, width), np.uint8) for i in range(len(points) - 1): pt1 = points[i] pt2 = points[i+1] cv2.line(tentacle_mask, pt1, pt2, [255, 255, 255], int(buzzmobile_width * pixels_per_m)) return tentacle_mask
764251efff298f4b1990910d04c31ea9ed1760fd
6,375
def _matrix_to_real_tril_vec(matrix): """Parametrize a positive definite hermitian matrix using its Cholesky decomposition""" tril_matrix = la.cholesky(matrix, lower=True) diag_vector = tril_matrix[np.diag_indices(tril_matrix.shape[0])].astype(float) complex_tril_vector = tril_matrix[np.tril_indices(tril_matrix.shape[0], -1)] real_tril_vector = _complex_to_real(complex_tril_vector) return np.concatenate((diag_vector, real_tril_vector))
c59a0cd9fde6d77619a9681c3989efc9d704c07b
6,376
def max_pool(pool_size, strides, padding='SAME', name=None): """max pooling layer""" return tf.layers.MaxPooling2D(pool_size, strides, padding, name=name)
5259160d3f2955b16e039482e7c51cb2f6d777e9
6,378
def size_as_minimum_int_or_none(size): """ :return: int, max_size as max int or None. For example: - size = no value, will return: None - size = simple int value of 5, will return: 5 - size = timed interval(s), like "2@0 22 * * *:24@0 10 * * *", will return: 2 """ return min(size_as_recurrence_map(size).values())
742dc4f2d175a9372cc60e73dad21da9e927dc0c
6,379
import torch def args_to_numpy(args): """Converts all Torch tensors in a list to NumPy arrays Args: args (list): list containing QNode arguments, including Torch tensors Returns: list: returns the same list, with all Torch tensors converted to NumPy arrays """ res = [] for i in args: if isinstance(i, torch.Tensor): if i.is_cuda: # pragma: no cover res.append(i.cpu().detach().numpy()) else: res.append(i.detach().numpy()) else: res.append(i) # if NumPy array is scalar, convert to a Python float res = [i.tolist() if (isinstance(i, np.ndarray) and not i.shape) else i for i in res] return res
fbf01c2ea236cc11f7b1d7a835b0a0ba338ba153
6,380
def optimizer_setup(model, params): """ creates optimizer, can have layer specific options """ if params.optimizer == 'adam': if params.freeze_backbone: optimizer = optimizer_handler.layer_specific_adam(model, params) else: optimizer = optimizer_handler.plain_adam(model, params) elif params.optimizer == 'sgd': if params.freeze_backbone: optimizer = optimizer_handler.layer_specific_sgd(model, params) else: optimizer = optimizer_handler.plain_sgd(model, params) if params.zero_bn_bias_decay: optimizer = zero_wdcay_bn_bias(optimizer) return optimizer
c58427d7da66a02c2a44f92cb7d6350e2b9a83fd
6,381
def charToEmoji(char, spaceCounter=0): """ If you insert a space, make sure you have your own space counter and increment it. Space counter goes from 0 to 3. """ if char in emojitable.table: print(char) if char == ' ': emoji = emojitable.table[char][spaceCounter] else: emoji = emojitable.table[char] return emoji
4943152d932f1529af86cdff827ff069f173fcb3
6,382
def averages_area(averages): """ Computes the area of the polygon formed by the hue bin averages. Parameters ---------- averages : array_like, (n, 2) Hue bin averages. Returns ------- float Area of the polygon. """ N = averages.shape[0] triangle_areas = np.empty(N) for i in range(N): u = averages[i, :] v = averages[(i + 1) % N, :] triangle_areas[i] = (u[0] * v[1] - u[1] * v[0]) / 2 return np.sum(triangle_areas)
62ef194172095a9e7ddd6b9cb0cccb6d95fb2c4a
6,383
def _tree_cmp(fpath1: PathLike, fpath2: PathLike, tree_format: str = 'newick') -> bool: """Returns True if trees stored in `fpath1` and `fpath2` are equivalent, False otherwise. Args: fpath1: First tree file path. fpath2: Second tree file path. tree_format: Tree format, i.e. ``newick``, ``nexus``, ``phyloxml`` or ``nexml``. """ ref_tree = Phylo.read(fpath1, tree_format) target_tree = Phylo.read(fpath2, tree_format) # Both trees are considered equal if they have the same leaves and the same distance from each to the root ref_dists = {leaf.name: ref_tree.distance(leaf) for leaf in ref_tree.get_terminals()} target_dists = {leaf.name: target_tree.distance(leaf) for leaf in target_tree.get_terminals()} return ref_dists == target_dists
0c42386b94d9bf6c157b0d60593413074af772f4
6,384
from typing import Sequence def parse_genemark(input_f, genbank_fp): """ Extract atypical genes identified by GeneMark Parameters ---------- input_f: string file descriptor for GeneMark output gene list (*.lst) genbank_fp: string file path to genome in GenBank format Notes ----- genbank_fp is the intermediate GenBank file generated by reformat_input.py, in which multiple sequences are concantenated, instead of the original GenBank file. Returns ------- output: string gene names (protein_ids) separated by newline """ genes = {} gb = Sequence.read(genbank_fp, format='genbank') for feature in gb.interval_metadata._intervals: m = feature.metadata if m['type'] == 'CDS' and 'protein_id' in m: protein_id = m['protein_id'].replace('\"', '') if protein_id not in genes: strand = m['strand'] start = feature.bounds[0][0] + 1 end = feature.bounds[0][1] genes[protein_id] = (start, end, strand) atypical_genes = [] reading = False for line in input_f: x = line.strip().split() if len(x) == 2 and x == ['#', 'Length']: reading = True # atypical genes have class '2' in the 6th column elif reading and len(x) == 6 and x[5] == '2': (start, end, strand) = (int(x[2].lstrip('<>')), int(x[3].lstrip('<>')), x[1]) for (gene, x) in genes.items(): if x[0] == start and x[1] == end and x[2] == strand: atypical_genes.append(gene) return '\n'.join(sorted(atypical_genes))
3621d81eedad83f66a1be405bc49c2da3ea520d9
6,385
def get_hex(fh, nbytes=1): """ get nbyte bytes (1 by default) and display as hexidecimal """ hstr = "" for i in range(nbytes): b = "%02X " % ord(fh) hstr += b return hstr
b1d426f7bfcceffa829c9dcc1150f32be5c48413
6,386
def fetch(pages, per_page, graph): """ Get a list of posts from facebook """ return [x.replace('\n', '') for name in pages for x in fetch_page(name, per_page, graph)]
ea9af2e1d2fe9c2880aebd1148cb8f6457f55bb2
6,387
def lifted_struct_loss(labels, embeddings, margin=1.0): """Computes the lifted structured loss. Args: labels: 1-D tf.int32 `Tensor` with shape [batch_size] of multiclass integer labels. embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should not be l2 normalized. margin: Float, margin term in the loss definition. Returns: lifted_loss: tf.float32 scalar. """ # Reshape [batch_size] label tensor to a [batch_size, 1] label tensor. lshape = tf.shape(labels) labels = tf.reshape(labels, [lshape[0], 1]) # Build pairwise squared distance matrix. pairwise_distances = metric_learning.pairwise_distance(embeddings) # Build pairwise binary adjacency matrix. adjacency = tf.math.equal(labels, tf.transpose(labels)) # Invert so we can select negatives only. adjacency_not = tf.math.logical_not(adjacency) batch_size = tf.size(labels) diff = margin - pairwise_distances mask = tf.cast(adjacency_not, dtype=tf.dtypes.float32) # Safe maximum: Temporarily shift negative distances # above zero before taking max. # this is to take the max only among negatives. row_minimums = tf.math.reduce_min(diff, 1, keepdims=True) row_negative_maximums = tf.math.reduce_max( tf.math.multiply(diff - row_minimums, mask), 1, keepdims=True) + row_minimums # Compute the loss. # Keep track of matrix of maximums where M_ij = max(m_i, m_j) # where m_i is the max of alpha - negative D_i's. # This matches the Caffe loss layer implementation at: # https://github.com/rksltnl/Caffe-Deep-Metric-Learning-CVPR16/blob/0efd7544a9846f58df923c8b992198ba5c355454/src/caffe/layers/lifted_struct_similarity_softmax_layer.cpp # pylint: disable=line-too-long max_elements = tf.math.maximum(row_negative_maximums, tf.transpose(row_negative_maximums)) diff_tiled = tf.tile(diff, [batch_size, 1]) mask_tiled = tf.tile(mask, [batch_size, 1]) max_elements_vect = tf.reshape(tf.transpose(max_elements), [-1, 1]) loss_exp_left = tf.reshape( tf.math.reduce_sum( tf.math.multiply( tf.math.exp(diff_tiled - max_elements_vect), mask_tiled), 1, keepdims=True), [batch_size, batch_size]) loss_mat = max_elements + tf.math.log(loss_exp_left + tf.transpose(loss_exp_left)) # Add the positive distance. loss_mat += pairwise_distances mask_positives = tf.cast( adjacency, dtype=tf.dtypes.float32) - tf.linalg.diag( tf.ones([batch_size])) # *0.5 for upper triangular, and another *0.5 for 1/2 factor for loss^2. num_positives = tf.math.reduce_sum(mask_positives) / 2.0 lifted_loss = tf.math.truediv( 0.25 * tf.math.reduce_sum( tf.math.square( tf.math.maximum( tf.math.multiply(loss_mat, mask_positives), 0.0))), num_positives) return lifted_loss
ac6de39c7b4fc204dbf46a716f796434da959134
6,388
def get_uint8_rgb(dicom_path): """ Reads dicom from path and returns rgb uint8 array where R: min-max normalized, G: CLAHE, B: histogram equalized. Image size remains original. """ dcm = _read_dicom_image(dicom_path) feats = _calc_image_features(dcm) return (feats*255).astype(np.uint8)
a81362af49a8c93c2e0224f033260ed3a0e5931f
6,389
def query_db_cluster(instanceid): """ Querying whether DB is Clustered or not """ try: db_instance = RDS.describe_db_instances( DBInstanceIdentifier=instanceid ) return db_instance['DBInstances'][0]['DBClusterIdentifier'] except KeyError: return False
6b84db4ff0b3788085ca4313aa397a6bd675e696
6,391
from typing import Callable from re import T def is_nsfw() -> Callable[[T], T]: """A :func:`.check` that checks if the channel is a NSFW channel. This check raises a special exception, :exc:`.ApplicationNSFWChannelRequired` that is derived from :exc:`.ApplicationCheckFailure`. .. versionchanged:: 2.0 Raise :exc:`.ApplicationNSFWChannelRequired` instead of generic :exc:`.ApplicationCheckFailure`. DM channels will also now pass this check. """ def pred(ctx: ApplicationContext) -> bool: ch = ctx.channel if ctx.guild is None or (isinstance(ch, (discord.TextChannel, discord.Thread)) and ch.is_nsfw()): return True raise ApplicationNSFWChannelRequired(ch) # type: ignore return check(pred)
5941b74e55c43597f3c3e367434c3a0b54d92209
6,392
def calc_MAR(residuals, scalefactor=1.482602218): """Return median absolute residual (MAR) of input array. By default, the result is scaled to the normal distribution.""" return scalefactor * np.median(np.abs(residuals))
1691bf7883310562f4ee9e84d07c1fa188fe306b
6,393
def resourceimport_redirect(): """ Returns a redirection action to the main resource importing view, which is a list of files available for importing. Returns: The redirection action. """ return redirect(url_for('resourceimportfilesview.index'))
be5c6e7ef9fcc5c369d31a75960c2849cede2b5f
6,394
def gen_fov_chan_names(num_fovs, num_chans, return_imgs=False, use_delimiter=False): """Generate fov and channel names Names have the format 'fov0', 'fov1', ..., 'fovN' for fovs and 'chan0', 'chan1', ..., 'chanM' for channels. Args: num_fovs (int): Number of fov names to create num_chans (int): Number of channel names to create return_imgs (bool): Return 'chanK.tiff' as well if True. Default is False use_delimiter (bool): Appends '_otherinfo' to the first fov. Useful for testing fov id extraction from filenames. Default is False Returns: tuple (list, list) or (list, list, list): If return_imgs is False, only fov and channel names are returned If return_imgs is True, image names will also be returned """ fovs = [f'fov{i}' for i in range(num_fovs)] if use_delimiter: fovs[0] = f'{fovs[0]}_otherinfo' chans = [f'chan{i}' for i in range(num_chans)] if return_imgs: imgs = [f'{chan}.tiff' for chan in chans] return fovs, chans, imgs else: return fovs, chans
417490259c42a52c58aab418fbb63185602e6750
6,396
def get_supported(): """ Returns a list of hints supported by the window manager. :return: A list of atoms in the _NET_SUPPORTED property. :rtype: util.PropertyCookie (ATOM[]/32) """ return util.PropertyCookie(util.get_property(root, '_NET_SUPPORTED'))
038e7d74cd6cdf2a0dc1d04a5e54b312a1a44b0e
6,397
def get_exportables(): """Get all exportables models except snapshot""" exportables = set(converters.get_exportables().values()) exportables.discard(all_models.Snapshot) return exportables
21dddb65f0193d02aae47d88a2743b9c92b3b245
6,398
import time def playback(driver, settings, record, output, mode=None): # pylint: disable=W0621,R0912 """ Playback a given test. """ if settings.desc: output("%s ... " % settings.desc, flush=True) else: output("Playing back %s ... " % settings.name, flush=True) _begin_browsing(driver, settings) wait_until_loaded(driver) state = states.OK err = None mode = mode or modes.PLAYBACK try: for step in record.steps: step.delayer(driver) timeout = 0 while timeout < 40: timeout += 1 if not driver.execute_script(js.isPageChanging(250)): # milliseconds step.execute(driver, settings, mode) break else: time.sleep(0.25) if timeout == 40: raise exc.PlaybackTimeout( '%s timed out while waiting for the page to be static.' \ % settings.name ) except Exception as exception: # pylint: disable=W0703 if isinstance(exception, exc.ScreenshotsDiffer): state = states.FAIL err = exception else: state = states.ERROR if hasattr(exception, 'msg') and (exception.msg.startswith('element not visible') or exception.msg.startswith('Element is not currently visible')): err = exc.ElementNotVisible( "Element was not visible when expected during playback. If " "your playback depended on a significant rerender having been " "done, then make sure you've waited until nothing is changing " "before taking a screenshot." ) else: err = exception output('%s' % str(state)) if err: output(': %s' % str(err)) return (state, err)
36cd718760b76100a9959a1568b8c21f2ea7e334
6,399