content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def get_mdot_code(rho, ucon, gdet, dx2, dx3): """Returns dMact in code units.""" dMacts = gdet * rho * ucon[:, :, :, 1] return dMacts[:21].sum() * dx2 * dx3 / 21.
6da5dc0205a7c8a57af473dcaee388fa0f84b0ff
696,621
import json def j2d(data): """Convert json to dict. Parameters ---------- data : string JSON-formatted string. Returns ------- dict Data as dict. """ return json.loads(data)
31abd9bbf8b0eecf2fb2cd12310bb1642ec209c8
696,623
def read_vx(pointdata): """Read a variable-length index.""" if pointdata[0] != 255: index= pointdata[0]*256 + pointdata[1] size= 2 else: index= pointdata[1]*65536 + pointdata[2]*256 + pointdata[3] size= 4 return index, size
cc3c2335430bcff12a2824013b450a8a71ce9379
696,624
def are_passwords_matching(form) -> bool: """ Checks whether password field and password confirm field have same value :param form: filled Django form instance :return: true if fields have same value, false otherwise """ return form.cleaned_data['password'] == form.cleaned_data['password_confirm']
02067f5ff2f9914dfdf6dd81011b15e6007db457
696,627
import torch def neginf(dtype): """ Return a representable finite number near -inf for a dtype. """ if dtype is torch.float16: return -65504 else: return -1e20
b6834f7cf2c25d60b679bcbda32e3473cc20b9de
696,634
def zeros_mat(r, c): """ r by c matrix full of zeroes :param r: rows of matrix :param c: columns of matrix :return: 2D array of the matrix (list containing elements of each row) """ A = [] while len(A) < r: A.append([]) # Generate Row while len(A[-1]) < c: # traverse the columns until the last row is reached A[-1].append(0) # initialize all column values to 0 until the last row is reached return A
4ec8e1a3457db91d0ef69b9ad757378ab477fa30
696,638
def calculate_yearly_total(sales: list[list[str]]) -> int: """Calculate the total yearly sales.""" total: int = 0 for row in sales: total += int(row[-1]) return total
dbf5fbd038d0e7b2fad7e6794803b919e5902518
696,640
import tokenize def bracket_delta(token): """Returns +/-1 if the current token increases/decreases bracket nesting depth, 0 otherwise.""" if token[0] == tokenize.OP and token[1] in ['{', '(', '[']: return 1 elif token[0] == tokenize.OP and token[1] in ['}', ')', ']']: return -1 else: return 0
0c225259482bad2cd5470b69f2c232bff1d421e2
696,643
import torch def mapping_shtools_to_compact(lmax): """ pyshtools uses an output format to represent spherical harmonic coefficients that is not memory efficient. This function creates a mapping to represent the coefficients differently. Our representation Y(l, m), shape(lmax+1, lmax+1) : Y(0, 0) Y(1, 1) Y(2, 2) ... Y(1,-1) Y(1, 0) Y(2, 1) ... Y(2,-2) Y(2,-1) Y(2, 0) ... ... ... ... Example : mapping = mapping_shtools_to_compact(lmax) x, _ = SHExpandLSQ(d, phi, theta, lmax, [1, -1]) # pyshtools y = torch.tensor(x)[mapping[..., 0], mapping[..., 1], mapping[..., 2]] # Compact z = torch.zeros([2, lmax+1, lmax+1]) z[mapping[..., 0], mapping[..., 1], mapping[..., 2]] = y # Back to pyshtools """ mapping = torch.zeros([lmax + 1, lmax + 1, 3], dtype=torch.long) mapping[..., 0] = torch.tril(torch.ones([lmax + 1, lmax + 1], dtype=torch.long)) - torch.eye(lmax + 1, dtype=torch.long) linspace = torch.linspace(0, lmax, lmax + 1, dtype=torch.long) mapping[..., 1] = torch.triu(linspace.view(1, -1).expand(lmax + 1, lmax + 1)) \ + torch.tril(linspace.view(-1, 1).expand(lmax + 1, lmax + 1) - torch.diag(linspace)) mapping[..., 2] = torch.abs(linspace.view(1, -1).expand(lmax + 1, lmax + 1) - linspace.view(-1, 1).expand(lmax + 1, lmax + 1)) return mapping
48c821c74f24f66af367f40204301badde71b379
696,648
from bs4 import BeautifulSoup def get_corpus_file_soup(corpus_filename): """ For a given corpus xml filename, return its BeautifulSoup soup. """ return BeautifulSoup(open(corpus_filename), 'xml')
6327cfc185e1ac1c7372b4879dcbcf62c0689a89
696,649
def items(dct_or_lst): """Returns list items, or dictionary items""" if isinstance(dct_or_lst, dict): return list(dct_or_lst.items()) return list(enumerate(dct_or_lst))
7526f98214de1b76cc622d98abf4e0d5d3e003eb
696,650
def vec(A): """Return the vectorized matrix A by stacking its columns.""" return A.reshape(-1, order="F")
668bb854406b7a5d7442b66a7de4c3222e25386c
696,655
def method_withBadName_with_parameters_on_multiple_lines(x, y): """Provide parameters on multiple lines test case.""" return x + y
a7fee09cd434d646d9eaa70c5cb5d7fb37f68b4e
696,657
def invalid_fg_vsby(s, v): """Checks if visibility is inconsistent with FG""" # NWSI 10-813, 1.2.6 if len(s) == 2 or s.startswith('FZ'): if v > 0.6: return True elif s.startswith('MI'): if v < 0.6: return True return False
2b4fb86c19deef2893b8dfa0b55cd13ea2749e6f
696,659
def getExcludes(backendName): """ getExcludes(backendName) Get a list of excludes. If using the 'wx' backend, you don't want all the qt4 libaries. backendName is the name of the backend which you do want to use. """ # init excludes = [] # Neglect qt4 if 'qt4' != backendName: excludes.extend(["sip", "PyQt4", "PyQt4.QtCore", "PyQt4.QtGui"]) # Neglect wx if 'wx' != backendName: excludes.extend(["wx"]) # done return excludes
d5c53034bdbf12af5292bc2f77b74eb2ba2e2ef1
696,668
import yaml def read_config_file(config_file): """ Read the yaml config file specified. :param config_file: path to config file. :return: Object representing contents of config file. """ config_file_data = {} if config_file: with open(config_file) as file_handle: config_file_data = yaml.safe_load(file_handle) return config_file_data
9f21bfac3e8ac772782aafbc711b8cb04e2580bc
696,671
def username_to_file(username): """ Return the network file name according to the username """ return '{0}.gml'.format(username)
4cca884c92f07f18427a68bc28fe9d25f9ec94f7
696,672
import colorsys def hsv_to_rgb(h, s, v): """ Convert an HSV tuple to an RGB hex string. h -- hue (0-360) s -- saturation (0-100) v -- value (0-255) Returns a hex RGB string, i.e. #123456. """ r, g, b = tuple(int(i * 255) for i in colorsys.hsv_to_rgb(h / 360, s / 100, v / 255)) return '#{:02X}{:02X}{:02X}'.format(r, g, b)
48bb7e31f16b6c435094aa990bb4d02f06cc37f7
696,673
import fcntl def lockFile(lockfile): """Attempt to create lock file or fail if already locked""" fp = open(lockfile, 'w') try: fcntl.lockf(fp, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError: return False return True
76fb74f2b5218834b66f017857bf7d0d087533b3
696,676
def depends(*dependencies): """Decorator function for marking fixture dependencies of a function. Example: .. code:: python from rok.fixtures import fixture, depends @depends("engine") def fetch_records(engine): # Do something with the engine ... # Fixtures themselves can also depend on other fixtures @fixture @depends("config") def engine(config): return create_engine(config=config) @fixture def config: return load_config() Args: *dependencies: Fixtures the decorated function depends on Returns: callable: Decorator for explicitly marking function dependencies. """ def decorator(func): if not hasattr(func, "depends"): func.depends = [] func.depends.extend(dependencies) return func return decorator
6b8a7b8f24705b9f545933ffc580788062752fb3
696,677
import torch def so3_rotation_angle(R, eps: float = 1e-4, cos_angle: bool = False): """ Calculates angles (in radians) of a batch of rotation matrices `R` with `angle = acos(0.5 * (Trace(R)-1))`. The trace of the input matrices is checked to be in the valid range `[-1-eps,3+eps]`. The `eps` argument is a small constant that allows for small errors caused by limited machine precision. Args: R: Batch of rotation matrices of shape `(minibatch, 3, 3)`. eps: Tolerance for the valid trace check. cos_angle: If==True return cosine of the rotation angles rather than the angle itself. This can avoid the unstable calculation of `acos`. Returns: Corresponding rotation angles of shape `(minibatch,)`. If `cos_angle==True`, returns the cosine of the angles. Raises: ValueError if `R` is of incorrect shape. ValueError if `R` has an unexpected trace. """ N, dim1, dim2 = R.shape if dim1 != 3 or dim2 != 3: raise ValueError("Input has to be a batch of 3x3 Tensors.") rot_trace = R[:, 0, 0] + R[:, 1, 1] + R[:, 2, 2] if ((rot_trace < -1.0 - eps) + (rot_trace > 3.0 + eps)).any(): raise ValueError( "A matrix has trace outside valid range [-1-eps,3+eps]." ) # clamp to valid range rot_trace = torch.clamp(rot_trace, -1.0, 3.0) # phi ... rotation angle phi = 0.5 * (rot_trace - 1.0) if cos_angle: return phi else: return phi.acos()
8b491db2fb5dfe8ffbeada503316d425e0df580c
696,679
def has_options(cli): """ Checks if the cli command contains any options (e.g. --regex $REGEX). """ for item in cli: if '--' in item: return True return False
bc7b7400fc810d7195f03ba2805112645540ee63
696,681
def end_file(current_output): """End an output file. This is smart enough to do nothing if current_output is None. @param current_output: the current file output (or None). @returns: None, to represent the closed stream. """ if current_output: # write the iati-activities end tag current_output.write("</iati-activities>\n") # close the output current_output.close() return None
5b974790ad78038654080f8b0b11a183e4b1513b
696,683
def getChanceAgreement(l1, l2): """ Returns p_e, the probability of chance agreement: (1/N^2) * sum(n_k1 * n_k2) for rater1, rater2, k categories (i.e. two in this case, 0 or 1), for two binary lists L1 and L2 """ assert(len(l1) == len(l2)) summation = 0 for label in [0, 1]: summation += l1.count(label) * l2.count(label) return (1 / float(len(l1)**2)) * summation
a4d11255ab8607c62a140b4bbd145b73b1e18524
696,684
def minimum_absolute_difference(arr): """Hackerrank Problem: https://www.hackerrank.com/challenges/minimum-absolute-difference-in-an-array/problem Given an array of integers, find and print the minimum absolute difference between any two elements in the array. Solve: Sort the array, and then compare the different between each two adjacent values. After sorting, we know that the minimum absolute difference has to be between two values that are stored sequentially in the list so we simply find the smallest difference and return that Args: arr: Array of integers to check Returns: int: The minimum absolute difference between two elements of the array """ arr.sort() min_diff = arr[-1] - arr[0] for i in range(len(arr)-1): if arr[i+1] - arr[i] < min_diff: min_diff = arr[i+1] - arr[i] return min_diff
00dc3ce179282b669407ea3a94fea07538a404d9
696,686
def find_dead_end_reactions(model): """ Identify reactions that are structurally prevented from carrying flux (dead ends). """ stoichiometries = {} for reaction in model.reactions: for met, coef in reaction.metabolites.items(): stoichiometries.setdefault(met.id, {})[reaction] = coef blocked_reactions = set() while True: new_blocked = set() for met_id, stoichiometry in stoichiometries.items(): if len(stoichiometry) == 1: # Metabolite is only associated with 1 reaction, which can thus not be active new_blocked.add(list(stoichiometry)[0]) if len(new_blocked) == 0: break # No more blocked reactions # Remove blocked reactions from stoichiometries stoichiometries = { met_id: {reac: coef for reac, coef in stoichiometry.items() if reac not in new_blocked} for met_id, stoichiometry in stoichiometries.items()} blocked_reactions.update(new_blocked) return frozenset(blocked_reactions)
c60af7b0a1c2813e57c599ddb5516ec1fe8d2aa2
696,687
def de_punc(s, punc=None, no_spaces=True, char='_'): """Remove punctuation and/or spaces in strings and replace with underscores or nothing Parameters ---------- s : string input string to parse punc : string A string of characters to replace ie. '@ "!\'\\[]' no_spaces : boolean True, replaces spaces with underscore. False, leaves spaces char : string Replacement character """ if (punc is None) or not isinstance(punc, str): punc = '!"#$%&\'()*+,-./:;<=>?@[\\]^`{|}~' # _ removed if no_spaces: punc = " " + punc s = "".join([[i, char][i in punc] for i in s]) return s
670a1bdc19fb65684e0f334542e5734cf06a636e
696,688
def GetTypeFromSoappyService(type_name, ns, soappy_service): """Digs in a SOAPpy service proxy and returns the object representing a type. Args: type_name: string The name of the WSDL-defined type to search for. ns: string The namespace the given WSDL-defined type belongs to. soappy_service: SOAPpy.WSDL.Proxy The SOAPpy service object encapsulating the information stored in the WSDL. Returns: mixed The object created by SOAPpy representing the given type. May be either a SOAPpy.wstools.XMLSchema.SimpleType or SOAPpy.wstools.XMLSchema.ComplexType object. """ return soappy_service.wsdl.types[ns].types[type_name]
6aa77925fe4d24020bb5860b370192a3fd0675e8
696,689
def equalizer(n: int, m: int, total: int): """ Receives total, m and n [0..total] Returns a tuple (a, b) so that their sum -> total, and a / b -> 1 """ oddity = total % 2 smallest = min(n, m, total // 2 + oddity) if smallest == n: return (n, min(m, total-n)) elif smallest == m: return (min(n, total-m), m) else: return (total // 2, total // 2 + oddity)
5f1a6177c9d728a00735f3330be3a4f5772bd283
696,690
def k_neighbors(kdtree, k): """ Get indices of K neartest neighbors for each point Parameters ---------- kdtree: pyntcloud.structrues.KDTree The KDTree built on top of the points in point cloud k: int Number of neighbors to find Returns ------- k_neighbors: (N, k) array Where N = kdtree.data.shape[0] """ # [1] to select indices and ignore distances # [:,1:] to discard self-neighbor return kdtree.query(kdtree.data, k=k + 1, n_jobs=-1)[1][:, 1:]
1e377c8a1444858b9fc5e293a8300fd20e0dea8a
696,691
import inspect def geometry_package(top: float, bottom: float, left: float, right: float) -> str: """ Generate latex code to add geometry package :param top: top margin :param bottom: bottom margin :param left: left margin :param right: right margin """ return inspect.cleandoc(rf""" \usepackage[left = {left}cm, right = {right}cm, top = {top}cm, bottom = {bottom}cm]{{geometry}} """)
676f5490ec599fcd10075365f0f90db84bd2896b
696,695
def sum_lists_product(list1, list2): """ Return the sum of multiplying corresponding list elements :param list1: A list :param list2: A list :return: A number representing the sum """ lst_sum = sum([x * y for x, y in zip(list1, list2)]) return lst_sum
8c27c510458c7591b2a8ca3ccdd7e3f54041f171
696,698
def _get_rest_endpoint_base_url(rest_service_base_url: str) -> str: """ Return the base URL of the endpoints of BAM's REST service. :param rest_service_base_url: Base URL of BAM's REST service. :type rest_service_base_url: str :return: The base URL of the endpoints of BAM's REST service. :rtype: str """ return ( rest_service_base_url + "v1" )
d947d242a63203f0007433be383ed74cb4289ff4
696,703
def get_feature_importance_by_model(model): """ Returns the features importance of a model :param model: the classifier :return: The list of feature importance """ return model.feature_importances_
b9dff896024f3a006862254289e9ee81b901e8a9
696,705
from typing import Tuple def parse_sender(line: str) -> Tuple[str, str]: """ Extracts the sender of a line (the timestamp should already be removed) :param line: Line to parse. :return: a tuple with the sender (if any) and the rest of the line. """ if ':' not in line: return "", line # It's a system message sender, rest_of_line = line.split(':', 1) rest_of_line = rest_of_line[1:] # remove leading space return sender, rest_of_line
637af2f7a73d1cd953a26b8cbd52eab5f3487fe3
696,707
import random import string def random_tag(k=8): """ Returns a random tag for disambiguating filenames """ return "".join(random.choice(string.ascii_uppercase + string.digits) for _ in range(k))
00ae606478b10a78bacb8cfe673554aebc738a89
696,709
def create_metrics_extension_conf(az_resource_id, aad_url): """ Create the metrics extension config :param az_resource_id: Azure Resource ID for the VM :param aad_url: AAD auth url for the VM """ conf_json = '''{ "timeToTerminateInMs": 4000, "configurationExpirationPeriodInMinutes": 1440, "configurationQueriesFrequencyInSec": 900, "configurationQueriesTimeoutInSec": 30, "maxAcceptedMetricAgeInSec": 1200, "maxDataEtwDelayInSec": 3, "maxPublicationAttemptsPerMinute": 5, "maxPublicationBytesPerMinute": 10000000, "maxPublicationMetricsPerMinute": 500000, "maxPublicationPackageSizeInBytes": 2500000, "maxRandomPublicationDelayInSec": 25, "metricsSerializationVersion": 4, "minGapBetweenPublicationAttemptsInSec": 5, "publicationTimeoutInSec": 30, "staleMonitoringAccountsPeriodInMinutes": 20, "internalMetricPublicationTimeoutInMinutes": 20, "dnsResolutionPeriodInSec": 180, "maxAggregationQueueSize": 500000, "initialAccountConfigurationLoadWaitPeriodInSec": 20, "etwMinBuffersPerCore": 2, "etwMaxBuffersPerCore": 16, "etwBufferSizeInKb": 1024, "internalQueueSizeManagementPeriodInSec": 900, "etwLateHeartbeatAllowedCycleCount": 24, "etwSampleRatio": 0, "maxAcceptedMetricFutureAgeInSec": 1200, "aggregatedMetricDiagnosticTracePeriod": 900, "aggregatedMetricDiagnosticTraceMaxSize": 100, "enableMetricMetadataPublication": true, "enableDimensionTrimming": true, "shutdownRequestedThreshold": 5, "internalMetricProductionLevel": 0, "maxPublicationWithoutResponseTimeoutInSec": 300, "maxConfigQueryWithoutResponseTimeoutInSec": 300, "maxThumbprintsPerAccountToLoad": 100, "maxPacketsToCaptureLocally": 0, "maxNumberOfRawEventsPerCycle": 1000000, "publicationSimulated": false, "maxAggregationTimeoutPerCycleInSec": 20, "maxRawEventInputQueueSize": 2000000, "publicationIntervalInSec": 60, "interningSwapPeriodInMin": 240, "interningClearPeriodInMin": 5, "enableParallelization": true, "enableDimensionSortingOnIngestion": true, "rawEtwEventProcessingParallelizationFactor": 1, "maxRandomConfigurationLoadingDelayInSec": 120, "aggregationProcessingParallelizationFactor": 1, "aggregationProcessingPerPartitionPeriodInSec": 20, "aggregationProcessingParallelizationVolumeThreshold": 500000, "useSharedHttpClients": true, "loadFromConfigurationCache": true, "restartByDateTimeUtc": "0001-01-01T00:00:00", "restartStableIdTarget": "", "enableIpV6": false, "disableCustomMetricAgeSupport": false, "globalPublicationCertificateThumbprint": "", "maxHllSerializationVersion": 2, "enableNodeOwnerMode": false, "performAdditionalAzureHostIpV6Checks": false, "compressMetricData": false, "publishMinMaxByDefault": true, "azureResourceId": "'''+ az_resource_id +'''", "aadAuthority": "'''+ aad_url +'''", "aadTokenEnvVariable": "MSIAuthToken" } ''' return conf_json
08c1bd23fc021515664e8476307264179c0c9652
696,711
import pickle def load_pickle(file_path): """ Unpickle some data from a given path. :param file_path: Target file path. :return: data: The python object that was serialized and stored in disk. """ pkl_file = open(file_path, 'rb') data = pickle.load(pkl_file) pkl_file.close() return data
2aa2d2fcdd408cedfce43c9cea05065904bd2c98
696,712
import itertools def _sorted_kwargs_list(kwargs): """ Returns a unique and deterministic ordered list from the given kwargs. """ sorted_kwargs = sorted(kwargs.items()) sorted_kwargs_list = list(itertools.chain(*sorted_kwargs)) return sorted_kwargs_list
e442044bf6ff1ec0d5308cd200272e4a8993174f
696,713
def dpd(td, t, roundit=True): """ This function calculates a dew point depression scalar or array from a scalar or array of temperature and dew point temperature and returns it. :param td: dew point temperature in degrees C (array or scalar) :param t: dry bulb temperature in degrees C (array or scalar) :param roundit: flag to tell function to round to one decimal place, default TRUE :type td: float :type t: float :type roundit: boolean :return: dew point depression in degrees C (array or scalar) :rtype: float Inputs: td = dew point temperature in degrees C (array or scalar) t = dry bulb temperature in degrees C (array or scalar) Outputs: dp = dew point depression in degrees C (array or scalar) Ref: TESTED! dpd = dpd(10..,15.) dpd = 5.0 """ if td is None or t is None: return None dp = t - td if roundit: dp = round(dp * 10.) / 10. return dp
b4505fa9ec6ee3fc5ec07788e35ddc494ae839bb
696,714
def format_parties(parties): """ Return the list of parties from the case title. :param parties: string containing the parties name :type parties: str :return: list of names :rtype: [str] """ if parties.startswith('CASE OF '): parties = parties[len('CASE OF '):] if parties[-1] == ')': parties = parties.split('(')[0] parties = parties.split(' v. ') parties = [p.strip() for p in parties] return parties
b0a9040d2c8a5b69647f92550c590192470d5692
696,716
import csv def read_csv_fieldnames(filename, separator, quote): """ Inputs: filename - name of CSV file separator - character that separates fields quote - character used to optionally quote fields Ouput: A list of strings corresponding to the field names in the given CSV file. """ #function assumes first row of CSV file contains the field names csvtable = [] with open(filename, "rt", newline = '') as csvfile: #open reader with delimeter and quotechar options to set seperator and quote csvreader = csv.reader(csvfile, delimiter = separator, quotechar = quote) #easiest way to access a row for row in csvreader: csvtable.append(row) #beak becuase you only need first row break #instead returning csvtable = [[]] lst is returned = [] lst = csvtable[0] return lst
ead8b5ff5ca11d47771cf793309a640e2b1410a1
696,717
def resolve_negative_axis(ndims, axis): """ Resolve all negative `axis` indices according to `ndims` into positive. Usage:: resolve_negative_axis(4, [0, -1, -2]) # output: (0, 3, 2) Args: ndims (int): Number of total dimensions. axis (Iterable[int]): The axis indices. Returns: tuple[int]: The resolved positive axis indices. Raises: ValueError: If any index in `axis` is out of range. """ axis = tuple(int(a) for a in axis) ret = [] for a in axis: if a < 0: a += ndims if a < 0 or a >= ndims: raise ValueError('`axis` out of range: {} vs ndims {}.'. format(axis, ndims)) ret.append(a) if len(set(ret)) != len(ret): raise ValueError('`axis` has duplicated elements after resolving ' 'negative axis: ndims {}, axis {}.'. format(ndims, axis)) return tuple(ret)
7f43943e20d66d6e9de8ea750fd1c2eb16764e69
696,719
def preprocess(train): """ This method replaces punctuations and newline characters with space character given a string. Then it splits all words into a list and returns it. Parameters: train -- The string that will be preprocessed Returns: train -- A list containing words """ train = train.replace('\n', ' ') punctuations = [",", ".", ":", "\"", "'", "/", "\\", "*", "=", "-", "_", ")", "(", "[", "]", "{", "}", "%", "+", "!", "@", "#", "$", "^", "&", "+", "|", ";", "<", ">", "?", "`", "~", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9"] for punctuation in punctuations: train = train.replace(punctuation, " ") train = train.split() return train
544befcc0e74d43261679aa634cc4d677f1bada2
696,720
def number_of_lines(filename=""): """ Function that returns the number of lines of a text file Args: filename (str): The name of the file Returns: The number of lines of the file """ cnt = 0 with open(filename, 'r', encoding='utf8') as f: for line in f: cnt += 1 return cnt
47539fc0280251f0334da9ce2ab2973d9f0cc63e
696,721
def to_bytes_literal(seq): """Prints a byte sequence as a Python bytes literal that only uses hex encoding.""" return 'b"' + "".join("\\x{:02x}".format(v) for v in seq) + '"'
dadbc38fd86daf2b6dd618eee6c792bc2044d09c
696,724
def filter_cellular_barcodes_manual(matrix, cell_barcodes): """ Take take all barcodes that were given as cell barcodes """ barcodes = list(set(matrix.bcs) & set(cell_barcodes)) metrics = { 'filtered_bcs': len(barcodes), 'filtered_bcs_lb': len(barcodes), 'filtered_bcs_ub': len(barcodes), 'max_filtered_bcs': 0, 'filtered_bcs_var': 0, 'filtered_bcs_cv': 0, } return barcodes, metrics, None
58a07034139fac1d189303203d6e849d5637e3fe
696,726
def line(etok): """String giving the starting line number of Etok.""" raw = etok.raw if raw: return f'line={raw[0].lineno}.' return ''
5b1160054db8b90fd27cc7557e1cee17b9f8f8e3
696,727
def isinstance_safe(value, type_): """Determine if value is a subclass of type_ Will work even if it is not a valid question to ask of the given value. """ try: return isinstance(value, type_) except TypeError: # Cannot perform isinstance on some types return False
57223307123efb564fe1cfd4089a4e056254c258
696,729
import collections def collapseExpansions(expansions, numDims): """Scans through the given list of expansions (each assumed to pertain to a single 3D image), and combines any which cover the same image area, and cover adjacent volumes. :args expansions: A list of expansion slices - see :func:`calcExpansions`. :args numDims: Number of dimensions covered by each expansion, not including the volume dimension (i.e. 3 for a 4D image). :returns: A list of expansions, with equivalent expansions that cover adjacent images collapsed down. .. note:: For one expansion ``exp`` in the ``expansions`` list, this function assumes that the range at ``exp[numDims]`` contains the image to which ``exp`` pertains (i.e. ``exp[numDims] == (vol, vol + 1)``). """ if len(expansions) == 0: return [] commonExpansions = collections.OrderedDict() expansions = sorted(expansions) for exp in expansions: vol = exp[numDims][0] exp = tuple(exp[:numDims]) commonExps = commonExpansions.get(exp, None) if commonExps is None: commonExps = [] commonExpansions[exp] = commonExps for i, (vlo, vhi) in enumerate(commonExps): if vol >= vlo and vol < vhi: break elif vol == vlo - 1: commonExps[i] = vol, vhi break elif vol == vhi: commonExps[i] = vlo, vol + 1 break else: commonExps.append((vol, vol + 1)) collapsed = [] for exp, volRanges in commonExpansions.items(): for vlo, vhi in volRanges: newExp = list(exp) + [(vlo, vhi)] collapsed.append(newExp) return collapsed
073923a29d065ee21e26ef234135b4d358ecd288
696,730
def div(n1, n2): """ Divides n1 by n2 >>> div(9, 3) 3.0 """ return n1 / n2
19419ea749c640fd7fb895e45bd2e06ad29bde32
696,732
def bounds(a): """Return a list of slices corresponding to the array bounds.""" return tuple([slice(0,a.shape[i]) for i in range(a.ndim)])
fa9c7c8ed51d5c10ecd4392a72c6efe379a9407b
696,738
def move1(state,b1,dest): """ Generate subtasks to get b1 and put it at dest. """ return [('get', b1), ('put', b1,dest)]
c84a2d8246017fa94a73dd2e3408f7f05cf3573d
696,739
def exists(dict, key): """ Check if a key exists in a dict """ return key in dict.keys()
3dd531b8a13af2e8905f721e05756bd8184e24c4
696,741
def allowed_file(filename): """ Returns true if file is a csv :param filename: File name input as a str :return: Boolean """ return '.' in filename and filename.rsplit('.', 1)[1].lower() == 'csv'
80a467c6cfc2372a212d797209cbd4979d5d2c96
696,743
from typing import List from typing import Union from typing import Set def get_reciprocal_rank(retrieved_docs: List, relevant_docs: Union[Set, List]) -> float: """ The mean reciprocal rank is a statistic measure for evaluating any process that produces a list of possible responses to a sample of queries, ordered by probability of correctness. rank_i: The reciprocal rank of a query response is the multiplicative inverse of the rank of the first correct answer- :param retrieved_docs: List of queries and their retrieved documents (from evaluated system) :param relevant_docs: List of queries and their relevant documents (from gold standard) :return: """ for rank, retrieved_doc in enumerate(retrieved_docs, start=1): if retrieved_doc in relevant_docs: return 1. / rank return 0.
f2c79f95a63489fc3e512a16c11ac77861cdd8ad
696,750
import random def fake_int_id(nmax=2 ** 31 -1): """ Create a random int id """ return random.randint(0, nmax)
0327db253e247e2c4229751741dd3296651f9fa6
696,752
def unfold(f): """Return function to unfold value into stream using passed function as values producer. Passed function should accept current cursor and should return: * tuple of two elements (value, cursor), value will be added to output, cursor will be used for next function call * None in order to stop producing sequence Usage: >>> doubler = unfold(lambda x: (x*2, x*2)) >>> list(islice(doubler(10), 0, 10)) [20, 40, 80, 160, 320, 640, 1280, 2560, 5120, 10240] """ def _unfolder(start): value, curr = None, start while 1: step = f(curr) if step is None: break value, curr = step yield value return _unfolder
1c2a0caec09d439c5e55c50f09eff78b376363a9
696,753
def format_time(t): """Return a formatted time string 'HH:MM:SS based on a numeric time() value""" m, s = divmod(t, 60) h, m = divmod(m, 60) return f'{h:0>2.0f}:{m:0>2.0f}:{s:0>2.0f}'
b5488fa3088195252e27da57fbe56037fad98591
696,754
import math def entropy(string): """Calculates the Shannon entropy of a string""" prob = [ float(string.count(c)) / len(string) for c in dict.fromkeys(list(string)) ] entropy = - sum([ p * math.log(p) / math.log(2.0) for p in prob ]) return entropy
410e5935740a10df26936ee11c714dfb2b102845
696,759
def get_std_u(u): """バックアップボイラーの給湯部の効率の回帰係数a_std_u, b_std_u, c_std_u Args: u(str): 用途を表す添え字(k,s,w,b1,b2,ba1,ba2) Returns: tuple: バックアップボイラーの給湯部の効率の回帰係数a_std_u, b_std_u, c_std_u """ # 表C.3 バックアップボイラーの給湯部の効率の回帰係数a_std_u, b_std_u, c_std_u table_d_3 = [ (0.0019, 0.0006, 0.0019, 0.0000, 0.0000, 0.0000, 0.0033), (0.0013, 0.0005, 0.0013, 0.0002, -0.0005, 0.0002, 0.0194), (0.6533, 0.7414, 0.6533, 0.7839, 0.7828, 0.7839, 0.5776) ] i = {'k': 0, 's': 1, 'w': 2, 'b1': 3, 'b2': 4, 'ba1': 5, 'ba2': 6}[u] return table_d_3[0][i], table_d_3[1][i], table_d_3[2][i]
7bbf32495baf9bd07d14b8fc73c73de167031031
696,760
def make_word_groups(vocab_words: str) -> str: """ This function takes a `vocab_words` list and returns a string with the prefix and the words with prefix applied, separated by ' :: '. Args: vocab_words (str): list of vocabulary words with a prefix. Returns: str: str of prefix followed by vocabulary words with prefix applied, separated by ' :: '. """ prefix = vocab_words[0] output = prefix for word in vocab_words[1:]: output += " :: " + prefix + word return output
878fc8100c6e2a455540773756bcbbde23a38224
696,766
def same_prefix(cebuano_word, word): """ Vérifie si deux mots ont le même préfixe (longueur 2 ou 3) Si les premières lettres sont des voyelles on les considère similaires """ if cebuano_word and word: if cebuano_word[0] in "aeiou" and word[0] in "eaiou": return cebuano_word[1:2] == word[1:2] else: return cebuano_word[0:2] == word[0:2] else: return False
0b3b8951fd82cc31ab62a9a72ba02b4b52404b77
696,768
def stop_word_removal(text_all, cached_stop_words): """ Returns text with removed stop words Keyword arguments: text_all -- list of all texts (list of str) cached_stop_words -- list of all stopwords (list of str) """ new_text_all = [] for text in text_all: text1 = ' '.join([word for word in text.split() if word not in cached_stop_words]) new_text_all.append(text1) return new_text_all
543e78edb078778d9acb2fd26be90c267a5b6450
696,769
import time def convertUTCtoUnix(data): """Convert UTC time format to UNIX time format""" return time.mktime(data.timetuple())
d27096baa842821a904b9338dd829e337678a5bc
696,774
def get_short_labeler(prefix): """ Returns a labeling function that prepends `prefix` to an assignment index. """ def labeler(index): return f"{prefix} {index:02d}" return labeler
a0037b8bb8e398efd8726309b914591ed6c6d75b
696,776
def ucFirst(s): """ Returns a copy of the string with the first letter uppercased """ return s[0].upper() + s[1:]
28b6d593f5c2a17ff2a6fc0bb4ae3280b93f275b
696,777
import requests def served_by_django(url): """Return True if url returns 200 and is served by Django. (NOT Nginx)""" r = requests.get(url, allow_redirects=False) status = (r.status_code == 200) django = ('x-served' not in r.headers or r.headers['x-served'] == 'nginx-via-django') return all([status, django])
e4d8991e91389ff4dbe588fd483c8bf6a1fe93ad
696,778
def get_python_idx(i): """Return i-1 for python indexing of array, list etc""" if isinstance(i, list): new_list = [] for el in i: new_list.append(el-1) return new_list else: return i-1
6d68287509b2c5f3554655f10218005f873ff806
696,780
import zlib def adler32_chunk(chunk): """ Returns weak adler32 checksum for chunk """ return zlib.adler32(chunk)
d9828965892d2b37d9a7f0b41bf48e601beef9da
696,782
def check_parameters(column_to_update, columns_to_query_lst, query_values_dict_lst): """ check_prarameters checks whether the passed parameters are valid or not. :param column_to_update: name of column that is to be updated. :param columns_to_query_lst: list of column names that is used in where clause. :param query_values_dict_lst: list of dictionaries containing values for where clause and target column. :return: boolean """ # check if dimensions are correct. expected_length = 1 + len(columns_to_query_lst) all_columns_name = ["update"] + columns_to_query_lst flag =0 for dict_val in query_values_dict_lst: # check dimensions. if len(dict_val)!= expected_length: print(("%s doesn't match the dimensions"%(dict_val))) return False # check columns present. for column in all_columns_name: if column not in dict_val: print(("%s column isn't present in dictionary"%(column))) return False return True
dc08667b78cbdbf98d5e692ca7a9f039bbd390a9
696,783
def _parse_endpoint_url(urlish): """ If given a URL, return the URL and None. If given a URL with a string and "::" prepended to it, return the URL and the prepended string. This is meant to give one a means to supply a region name via arguments and variables that normally only accept URLs. """ if '::' in urlish: region, url = urlish.split('::', 1) else: region = None url = urlish return url, region
bf3defcf9aeaca43d8aa8d7ba645cd0ba11b99f6
696,785
def is_reserved_name(name): """Tests if name is reserved Names beginning with 'xml' are reserved for future standardization""" if name: return name[:3].lower() == 'xml' else: return False
29ca0ec73b18259126a61aaf335a7d0946b72eb6
696,787
def find_alphabetically_last_word(text: str) -> str: """ Given a string |text|, return the word in |text| that comes last lexicographically (i.e. the word that would come last when sorting). A word is defined by a maximal sequence of characters without whitespaces. You might find max() handy here. If the input text is an empty string, it is acceptable to either return an empty string or throw an error. """ # BEGIN_YOUR_CODE (our solution is 1 line of code, but don't worry if you deviate from this) return max(text.split()) # END_YOUR_CODE
c5b6daca6ae60cabc36e88afa02bac2950c01763
696,794
import itertools def chain(*readers): """ Use the input data readers to create a chained data reader. The new created reader chains the outputs of input readers together as its output, and it do not change the format of the outputs. **Note**: ``paddle.reader.chain`` is the alias of ``paddle.fluid.io.chain``, and ``paddle.fluid.io.chain`` is recommended to use. For example, if three input readers' outputs are as follows: [0, 0, 0], [10, 10, 10], [20, 20, 20]. The chained reader will output: [0, 0, 0], [10, 10, 10], [20, 20, 20]. Args: readers(list): input data readers. Returns: callable: the new chained data reader. Examples: .. code-block:: python import paddle def reader_creator_3(start): def reader(): for i in range(start, start + 3): yield [i, i, i] return reader c = paddle.reader.chain(reader_creator_3(0), reader_creator_3(10), reader_creator_3(20)) for e in c(): print(e) # Output: # [0, 0, 0] # [1, 1, 1] # [2, 2, 2] # [10, 10, 10] # [11, 11, 11] # [12, 12, 12] # [20, 20, 20] # [21, 21, 21] # [22, 22, 22] """ def reader(): rs = [] for r in readers: rs.append(r()) for e in itertools.chain(*rs): yield e return reader
e2871dd057540463353ed2bf270ef5d8a52aa0e0
696,796
import binascii def hex_xformat_encode(v: bytes) -> str: """ Encode its input in ``X'{hex}'`` format. Example: .. code-block:: python special_hex_encode(b"hello") == "X'68656c6c6f'" """ return "X'{}'".format(binascii.hexlify(v).decode("ascii"))
19829e00ea198489a9ecdf926532cbc94432f2b0
696,798
def methodArgs(item): """Returns a dictionary formatted as a string given the arguments in item. Args: item: dictionary containing key 'args' mapping to a list of strings Returns: dictionary formatted as a string, suitable for printing as a value """ args = ["'%s': %s" % (arg, arg) for arg in item['args']] return '{%s}' % ', '.join(args)
3a0623105359c0a202390b777fb60372ae3af35c
696,800
def latex_decode(text): """ Decode ascii text latex formant to UTF-8 """ return text.encode('ascii').decode('latex')
d40b0b6e86ffabcd5685bdcd06d8546c68a523a6
696,806
def selection_sort_v2(li): """ [list of int] => [list of int] Same as selection_sort except it takes advantage of min() function. """ sorted_list = li # iterate as many times as the list is long for i in range(len(sorted_list)): # find the minimum in the unsorted list minimum = min(sorted_list[i:]) # locates the index of the minimum min_index = sorted_list.index(minimum) # swap the minimum and start of unsorted list sorted_list[i], sorted_list[min_index] = sorted_list[min_index], sorted_list[i] return sorted_list
a7b08b2018a1ad0c249cfd223f6250fed9884606
696,808
def decode_bytes(obj): """If the argument is bytes, decode it. :param Object obj: A string or byte object :return: A string representation of obj :rtype: str """ if isinstance(obj, bytes): return obj.decode('utf-8') elif isinstance(obj, str): return obj else: raise ValueError("ERROR: {} is not bytes or a string.".format(obj))
48c56e899cc83deb478cc665b3f051e1e99a18ae
696,809
def compute_DL_da_i(coeff_basis_sum, bases, time_index, i): """ | Ref. Paper [LUDW2011]_ eq. [80] | Compute derivative of the attitude deviation wrt attitude params. See :meth:`compute_coeff_basis_sum` :param coeff_basis_sum: the sum :math:`\\sum_{n=L-M+1}^{L} a_n B_n(t_L)` :param bases: Bspline basis, B_n(t_L) in the equation above. :param time_index: [int] index that will get us to return B_n(t_L). Since we stored only B_n for all the observed times t_L, it is possible to access them only with the index :param i: number of the base that we want (**n in the equations above**) """ dDL_da_i = -2 * coeff_basis_sum * bases[i, time_index] return dDL_da_i.reshape(4, 1)
e40139e8563e9f8457e1f8ed05dc4c66590ceb4e
696,812
def proj_permission_level(project, profile): """Given a project and profile return their permission level Args: project: a core.models.Project object profile: a core.models.Profile object Returns: permission_level: (int) 0: no permissions 1: coder 2: admin 3: creator """ if project.creator == profile: return 3 elif any(perm.profile == profile and perm.permission == 'ADMIN' for perm in project.projectpermissions_set.all()): return 2 elif any(perm.profile == profile and perm.permission == 'CODER' for perm in project.projectpermissions_set.all()): return 1 else: return 0
f11b3299cdc3087669811d64d457ad158160be90
696,813
def rgb_to_ansi16(r, g, b, use_bright=False): """ Convert RGB to ANSI 16 color """ ansi_b = round(b / 255.0) << 2 ansi_g = round(g / 255.0) << 1 ansi_r = round(r / 255.0) ansi = (90 if use_bright else 30) + (ansi_b | ansi_g | ansi_r) return ansi
084e1d4eea8b792a43f5e32c4a8f441e48b509be
696,814
def ThumbURL(image, viz_params=None): """Create a target url for tumb for an image. """ if viz_params: url = image.getThumbURL(viz_params) else: url = image.getThumbURL() return url
44d9e7eccede27a0e9a69798169f3a432198ba8b
696,816
def is_none(val): """Check for none as string""" return val in [None, 'none', 'None']
204e4cd64687bb0ae1063a060b45b497a8b1ce35
696,819
def KeyWithHighestValue(d, forbidden_keys=[]): """Return the key with the highest value. Optionally, a list of forbidden keys can be provided. If so, the function will return the key with the next-highest value, but which is not forbidden. """ mv = -1 mk = None for k, v in d.items(): if k in forbidden_keys: continue if v > mv: mk = k mv = v return mk
04db19f0047e35a12415471c9ad568a13f38cfe3
696,823
import gettext def get_i18n_content_by_lang(fmt, local, lang, **kw): """ Get another language string according to key string. reference - `Common Message Property <https://docs.python.org/2/library/gettext.html>`_ :param fmt: Multilingual key string. like _('This is a translatable string.') :param local: Domain corresponding to "fmt". :param lang: Language. ['en'|'ko'|'ja'] :param kw: Named variable parameter list. :return: a string. """ local_text = gettext.translation(local, 'locales', [lang]) if len(kw) > 0: content = local_text.gettext(fmt).format(**kw) else: content = local_text.gettext(fmt) return content
8eaba045a34e0b0493230850586995e863210319
696,829
def is_all_dict(alist): """Check if every element of a list are dicts""" return all([isinstance(l, dict) for l in alist])
2de5334c16876fc0995b569cbb04e980b57546c0
696,830
def _get_wind_direction(wind_direction_degree: float) -> str: """Convert wind direction degree to named direction.""" if 11.25 <= wind_direction_degree < 33.75: return "NNE" if 33.75 <= wind_direction_degree < 56.25: return "NE" if 56.25 <= wind_direction_degree < 78.75: return "ENE" if 78.75 <= wind_direction_degree < 101.25: return "E" if 101.25 <= wind_direction_degree < 123.75: return "ESE" if 123.75 <= wind_direction_degree < 146.25: return "SE" if 146.25 <= wind_direction_degree < 168.75: return "SSE" if 168.75 <= wind_direction_degree < 191.25: return "S" if 191.25 <= wind_direction_degree < 213.75: return "SSW" if 213.75 <= wind_direction_degree < 236.25: return "SW" if 236.25 <= wind_direction_degree < 258.75: return "WSW" if 258.75 <= wind_direction_degree < 281.25: return "W" if 281.25 <= wind_direction_degree < 303.75: return "WNW" if 303.75 <= wind_direction_degree < 326.25: return "NW" if 326.25 <= wind_direction_degree < 348.75: return "NNW" return "N"
b48af8b8407906a1ab84d8e0a62da76ad2519aa9
696,831
import gzip def _open_config_file(filename, mode="r"): """Open a file respecting .gz file endings.""" if filename[-3:] == ".gz": return gzip.open(filename, mode, encoding="UTF-8") else: return open(filename, mode, encoding="UTF-8")
f47eb8f9500ea0e7939387ffab8b854be4c2ba6a
696,833
def _get_oncotreelink(syn, databasetosynid_mappingdf, oncotree_link=None): """ Gets oncotree link unless a link is specified by the user Args: syn: Synapse object databasetosynid_mappingdf: database to synid mapping oncotree_link: link to oncotree. Default is None Returns: oncotree link """ if oncotree_link is None: oncolink = databasetosynid_mappingdf.query( 'Database == "oncotreeLink"').Id oncolink_ent = syn.get(oncolink.iloc[0]) oncotree_link = oncolink_ent.externalURL return oncotree_link
21bbb6bdc7eca3d8996b85acae0f822c8ddde587
696,834
def split_test_train_tadpole(df_train_test, df_eval, random_seed=0): """ Split dataframes into three parts: train, test & evaluation These are the sets as used in challenge evaluation for the paper Marinescu et al, 2020, ArXiv Train: patients (RIDs) from D1,D2 ADNI Data sets Test: roll-over patients (RIDs) from D1,D2 ADNI Data sets that are in D4 Eval: D4 ADNI Data set """ # get only subject IDs with at least 2 rows per subject (required for test/eval set) ids = df_train_test.groupby('RID').filter(lambda x: len(x) > 1)['RID'].unique() train_df = df_train_test[df_train_test['RID'].isin(ids)] # select all records where RID is in d4. test_df = df_train_test[ df_train_test['RID'].isin(df_eval['RID'].unique()) ] eval_df = df_eval return train_df, test_df, eval_df
fa922e50ec0e3b7121e46c3fd5b36ac3693785d7
696,838
def listOfTuplesToList(listOfTuples): """Convert a list of tuples into a simple list of tuple[0] items.""" res = [] for item in listOfTuples: res.append(item[0]) return res
aec287a830a75fc9a0f8ba1571e37ef2c846d9b6
696,839
def minDictionaries(d1,d2): """ Return the minimum of two dictionaries. Assumes they share the same keys. """ assert set(d1.keys()) == set(d2.keys()) return dict( (key, min(d1[key],d2[key])) for key in d1)
efa9bec3c4ae8e2e8e6c7387eda9e019ba7a0949
696,842
def _minify_promql(query: str) -> str: """ Given a PromQL query, return the same query with most whitespace collapsed. This is useful for allowing us to nicely format queries in code, but minimize the size of our queries when they're actually sent to Prometheus by the adapter. """ trimmed_query = [] # while we could potentially do some regex magic, we want to ensure # that we don't mess up any labels (even though they really shouldn't # have any whitespace in them in the first place) - thus we just just # strip any leading/trailing whitespace and leave everything else alone for line in query.split("\n"): trimmed_query.append(line.strip()) return (" ".join(trimmed_query)).strip()
de64a59b2db642e6b799f05fe5aa77749ea89b5d
696,845
def map_indexed(f, coll): """ Returns a generator consisting of the result of applying ``f`` to ``0`` and the first item of ``coll``, followed by applying ``f`` to ``1`` and the second item in ``coll``, etc, until ``coll`` is exhausted. Thus function ``f`` should accept 2 arguments, ``index`` and ``item``. """ return map(lambda pair: f(pair[0], pair[1]), enumerate(coll))
81c1a7511fb912f081021836e82afa1d2ddcd166
696,849
from typing import List def get_all_words_in_text(text: str) -> List[str]: """Get all the words in a given text in the order that they appear. :param text: the text to get all the word from :return: a list of words in the order that they appear in text """ # the `split` and `strip` method handles all kinds of white spaces # including the return text.strip().split()
4e1df302282eeed63dab2a7e9e5ffbb677b44427
696,852
import itertools def peek_at(iterable): """Returns the first value from iterable, as well as a new iterable with the same content as the original iterable """ gen = iter(iterable) peek = next(gen) return peek, itertools.chain([peek], gen)
368e7f341e00f66b24a7b5b34fb45863fa1c6203
696,853
def get_mutations(aln_df): """Get a list of residue numbers (in the original sequence's numbering) that are mutated Args: aln_df (DataFrame): Alignment DataFrame just_resnums: If only the residue numbers should be returned, instead of a list of tuples of (original_residue, resnum, mutated_residue) Returns: list: Residue mutations """ mutation_df = aln_df[aln_df['type'] == 'mutation'] tuples = [] if not mutation_df.empty: subset = mutation_df[['id_a_aa', 'id_a_pos', 'id_b_aa']] subset['id_a_pos'] = subset['id_a_pos'].astype(int) tuples = [tuple(x) for x in subset.values] return tuples
fac2f33cbbfedfe41137e212ef4e53e58b8684e2
696,855
def _is_bool(s: str) -> bool: """Check a value is a text bool.""" if s.lower() in ['true', 'false']: return True else: return False
62456de1e213157f8fe0a112abb7c6eaf1a59070
696,857