content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def _rstrip_inplace(array): """ Performs an in-place rstrip operation on string arrays. This is necessary since the built-in `np.char.rstrip` in Numpy does not perform an in-place calculation. """ # The following implementation convert the string to unsigned integers of # the right length. Trailing spaces (which are represented as 32) are then # converted to null characters (represented as zeros). To avoid creating # large temporary mask arrays, we loop over chunks (attempting to do that # on a 1-D version of the array; large memory may still be needed in the # unlikely case that a string array has small first dimension and cannot # be represented as a contiguous 1-D array in memory). dt = array.dtype if dt.kind not in 'SU': raise TypeError("This function can only be used on string arrays") # View the array as appropriate integers. The last dimension will # equal the number of characters in each string. bpc = 1 if dt.kind == 'S' else 4 dt_int = "{0}{1}u{2}".format(dt.itemsize // bpc, dt.byteorder, bpc) b = array.view(dt_int, np.ndarray) # For optimal speed, work in chunks of the internal ufunc buffer size. bufsize = np.getbufsize() # Attempt to have the strings as a 1-D array to give the chunk known size. # Note: the code will work if this fails; the chunks will just be larger. if b.ndim > 2: try: b.shape = -1, b.shape[-1] except AttributeError: # can occur for non-contiguous arrays pass for j in range(0, b.shape[0], bufsize): c = b[j:j + bufsize] # Mask which will tell whether we're in a sequence of trailing spaces. mask = np.ones(c.shape[:-1], dtype=bool) # Loop over the characters in the strings, in reverse order. We process # the i-th character of all strings in the chunk at the same time. If # the character is 32, this corresponds to a space, and we then change # this to 0. We then construct a new mask to find rows where the # i-th character is 0 (null) and the i-1-th is 32 (space) and repeat. for i in range(-1, -c.shape[-1], -1): mask &= c[..., i] == 32 c[..., i][mask] = 0 mask = c[..., i] == 0 return array
f59d6d127d4d3f0725df5eee2e4586ccbea9288b
3,653,937
def compute_fixpoint_0(graph, max_value): """ Computes the fixpoint obtained by the symbolic version of the backward algorithm for safety games. Starts from the antichain of the safe set and works backwards using controllable predecessors. The maximum value for the counters is a parameter to facilitate the incremental algorithm. :param graph: :type graph: :param max_value: :type max_value: :return: :rtype: """ # wether we want to print the sets during computation toPrint = False # get the values to create the create the antichain of maximal elements of the safe set nbr_functions, nbr_counters_per_function = compute_counters_sizes_0(graph) start_antichain = Antichain(comparator_generalized_0, intersector_generalized_0) # create the antichain of maximal elements of the safe set # every counter in every tuple has the maximal value for node in graph.get_nodes(): temp = [node] for func in range(0, nbr_functions): temp.append(nbr_counters_per_function[func] * [max_value]) start_antichain.insert(temp) if (toPrint): print("Start antichain : " + str(start_antichain) + "\n") antichain1 = start_antichain cpre1 = Cpre(start_antichain, 1, graph, nbr_functions, max_value) if (toPrint): print("CPre_1 of start antichain: " + str(cpre1) + "\n") cpre0 = Cpre(start_antichain, 0, graph, nbr_functions, max_value) if (toPrint): print("CPre_0 of start antichain: " + str(cpre0) + "\n") # we know the elements of cpre0 and cpre1 to be incomparable. Union of the two antichains can be done through # simple extend cpre0.incomparable_elements.extend(cpre1.incomparable_elements) if (toPrint): print("Union of CPre_0 and CPre_1 " + str(cpre0) + "\n") antichain2 = antichain1.intersection(cpre0) if (toPrint): print("Inter of start and previous union " + str(antichain2) + "\n") nb_iter = 0 # while we have not obtained the fixpoint while not antichain1.compare(antichain2): nb_iter += 1 antichain1 = antichain2 cpre1 = Cpre(antichain1, 1, graph, nbr_functions, max_value) if (toPrint): print("ITER " + str(nb_iter) + " CPre 1 of prev " + str(cpre1) + "\n") cpre0 = Cpre(antichain1, 0, graph, nbr_functions, max_value) if (toPrint): print("ITER " + str(nb_iter) + " CPre 0 of prev " + str(cpre0) + "\n") temp = cpre0.union(cpre1) if (toPrint): print("ITER " + str(nb_iter) + " Union of Pre 0 and Pre 1 " + str(temp) + "\n") antichain2 = antichain1.intersection(temp) if (toPrint): print("ITER " + str(nb_iter) + " final set " + str(antichain2) + "\n") return antichain1
e2ee9cb00ce6f88e03a080d85a635c208cdd5a35
3,653,938
def extract_unii_other_code(tree): """Extract the codes for other ingredients""" unii_other_xpath = \ '//generalizedMaterialKind/code[@codeSystem="%s"]/@code' % UNII_OTHER_OID return tree.getroot().xpath(unii_other_xpath)
0576dc7537a9212990a72125b8fd406c457efb76
3,653,939
import functools def initfunc(f): """ Decorator for initialization functions that should be run exactly once. """ @functools.wraps(f) def wrapper(*args, **kwargs): if wrapper.initialized: return wrapper.initialized = True return f(*args, **kwargs) wrapper.initialized = False return wrapper
337ca902fc1fbe138ad5dd4c203a3cac77e89f57
3,653,940
def edit_frame(frame): """edit frame to analyzable frame rgb 2 gray thresh frame color bitwise color Args frame (ndarray): original frame from movie Returns work_frame (ndarray): edited frame """ work_frame = frame work_frame = cv2.cvtColor(work_frame, cv2.COLOR_RGB2GRAY) work_frame = cv2.threshold(work_frame, FRAME_THRESH, 255, cv2.THRESH_BINARY)[1] work_frame = cv2.bitwise_not(work_frame) return work_frame
b82588fa81093c05e4a683b76aa367ba2be4b2e2
3,653,941
def manchester(bin_string): """ Applies the Manchester technique to a string of bits. :param bin_string: :type bin_string: str :return: :rtype: str """ signal_manager = Signal() for bin_digit in bin_string: if bin_digit == '0': # Generate +- if signal_manager.signal == '+': # It's positive signal_manager.keep() # + signal_manager.flip() # - else: # It's negative signal_manager.flip() # + signal_manager.flip() # - else: # Generate -+ if signal_manager.signal == '+': # It's positive signal_manager.flip() # - signal_manager.flip() # + else: # It's negative signal_manager.keep() # - signal_manager.flip() # + return str(signal_manager)
e5f58e929db74f0eeb2a003c25c5097f45c74989
3,653,942
from typing import List def load_admin_cells(identifier: str) -> List[MultiPolygon]: """Loads the administrative region cells Data is loaded from :py:const:`ADMIN_GEOJSON_TEMPLATE` ``% identifier``. This is a wrapper function for :py:func:`load_polygons_from_json`. Returns: A list of the administrative region cells. """ return load_polygons_from_json(ADMIN_GEOJSON_TEMPLATE % identifier)
dc2083ca7392da5b2d6509b6dd8f108bd8218726
3,653,943
import inspect def register_writer(format, cls=None): """Return a decorator for a writer function. A decorator factory for writer functions. A writer function should have at least the following signature: ``<class_name_or_generator>_to_<format_name>(obj, fh)``. `fh` is **always** an open filehandle. This decorator provides the ability to use filepaths in the same argument position as `fh`. They will automatically be opened and closed. **The writer must not close the filehandle**, cleanup will be handled external to the reader and is not its concern. Any additional `**kwargs` will be passed to the writer and may be used if necessary. The writer must not return a value. Instead it should only mutate the `fh` in a way consistent with it's purpose. If the writer accepts a generator, it should exhaust the generator to ensure that the potentially open filehandle backing said generator is closed. .. note:: Failure to adhere to the above interface specified for a writer will result in unintended side-effects. Parameters ---------- format : str A format name which a decorated writer will be bound to. cls : type, optional The class which a decorated writer will be bound to. If `cls` is None the writer will be bound as expecting a generator. Default is None. Returns ------- function A decorator to be used on a writer. The decorator will raise a ``skbio.io.DuplicateRegistrationError`` if there already exists a *writer* bound to the same permutation of `fmt` and `cls`. See Also -------- skbio.io.write skbio.io.get_writer """ def decorator(writer): format_class = _formats.setdefault(format, {}).setdefault(cls, {}) if 'writer' in format_class: raise DuplicateRegistrationError('writer', format, cls) file_args = [] writer_spec = inspect.getargspec(writer) if writer_spec.defaults is not None: # Concept from http://stackoverflow.com/a/12627202/579416 for key, default in zip( writer_spec.args[-len(writer_spec.defaults):], writer_spec.defaults): if default is FileSentinel: file_args.append(key) # We wrap the writer so that basic file handling can be managed # externally from the business logic. def wrapped_writer(obj, fp, mode='w', **kwargs): file_keys = [] files = [fp] for file_arg in file_args: if file_arg in kwargs: if kwargs[file_arg] is not None: file_keys.append(file_arg) files.append(kwargs[file_arg]) else: kwargs[file_arg] = None with open_files(files, mode) as fhs: for key, fh in zip(file_keys, fhs[1:]): kwargs[key] = fh writer(obj, fhs[0], **kwargs) wrapped_writer.__doc__ = writer.__doc__ wrapped_writer.__name__ = writer.__name__ format_class['writer'] = wrapped_writer return wrapped_writer return decorator
b8312d987cecfa106c73afb4eca5299637d260f6
3,653,944
import torch def accuracy(output, target, topk=1,axis=1,ignore_index=-100, exclude_mask=False): """Computes the precision@k for the specified values of k prec1, prec5 = accuracy(output.data, target, topk=(1, 5)) """ input_tensor=output.copy().detach() target_tensor=target.copy().detach() num_classes = int_shape(output)[axis] if len(input_tensor)==0: return to_tensor(0.0) is_logsoftmax = None from_logits = None output_exp = exp(input_tensor) if (ndim(input_tensor) >= 1 and 'float' in str(input_tensor.dtype) and input_tensor.min() >= 0 and input_tensor.max() <= 1): is_logsoftmax = False from_logits = True input_tensor = clip(input_tensor, min=1e-8, max=1 - 1e-8) elif (ndim(output_exp) >= 1 and 'float' in str(output_exp.dtype) and output_exp.min() >= 0 and output_exp.max() <= 1): is_logsoftmax = True from_logits = True input_tensor = clip(output_exp, min=1e-8, max=1 - 1e-8) else: is_logsoftmax = False from_logits = False if input_tensor.dtype!=torch.int64 and topk==1: if len(input_tensor.size())==1: #binary input_tensor=input_tensor.gt(0.5).float() else: input_tensor=argmax(input_tensor,axis).squeeze() if target_tensor.dtype!=torch.int64: target_tensor=argmax(target_tensor,axis).squeeze() if input_tensor.shape!=target_tensor.shape and topk==1: raise ValueError('input shape {0} is not competable with target shape {1}'.format(input_tensor.shape,target_tensor.shape)) input_mask=ones_like(input_tensor) if isinstance(ignore_index, int) and 0 <= ignore_index < num_classes: input_mask[input_tensor==ignore_index] = 0 elif isinstance(ignore_index, (list, tuple)): for idx in ignore_index: if isinstance(idx, int) and 0 <= idx < int_shape(output)[axis]: input_mask[input_tensor == idx] = 0 batch_size = target_tensor.size(0) if topk==1: return (input_tensor.eq(target_tensor).float()*input_mask).sum()/clip((input_mask).float().sum(),min=1) else: _, pred = input_tensor.topk(topk) pred = pred.t() correct = pred.eq(target_tensor.reshape((1, -1)).expand_as(pred)) correct_k = reduce_sum(correct[:topk].reshape(-1).float(),axis=0,keepdims=True) return correct_k.mul_(1 / batch_size)
a35d4bff308862c4c8a83948c619e66299d7887f
3,653,946
def jitter_over_thresh(x: xr.DataArray, thresh: str, upper_bnd: str) -> xr.DataArray: """Replace values greater than threshold by a uniform random noise. Do not confuse with R's jitter, which adds uniform noise instead of replacing values. Parameters ---------- x : xr.DataArray Values. thresh : str Threshold over which to add uniform random noise to values, a quantity with units. upper_bnd : str Maximum possible value for the random noise, a quantity with units. Returns ------- xr.DataArray Notes ----- If thresh is low, this will change the mean value of x. """ return jitter(x, lower=None, upper=thresh, minimum=None, maximum=upper_bnd)
1a508c30aa68c3b8808f3fe0b254ad98621cd245
3,653,947
from .rename_axis import rename_axis_with_level def index_set_names(index, names, level=None, inplace=False): """ Set Index or MultiIndex name. Able to set new names partially and by level. Parameters ---------- names : label or list of label Name(s) to set. level : int, label or list of int or label, optional If the index is a MultiIndex, level(s) to set (None for all levels). Otherwise level must be None. inplace : bool, default False Modifies the object directly, instead of creating a new Index or MultiIndex. Returns ------- Index The same type as the caller or None if inplace is True. See Also -------- Index.rename : Able to set new names without level. Examples -------- >>> import mars.dataframe as md >>> idx = md.Index([1, 2, 3, 4]) >>> idx.execute() Int64Index([1, 2, 3, 4], dtype='int64') >>> idx.set_names('quarter').execute() Int64Index([1, 2, 3, 4], dtype='int64', name='quarter') >>> idx = md.MultiIndex.from_product([['python', 'cobra'], ... [2018, 2019]]) >>> idx.execute() MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], ) >>> idx.set_names(['kind', 'year'], inplace=True) >>> idx.execute() MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], names=['kind', 'year']) >>> idx.set_names('species', level=0).execute() MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], names=['species', 'year']) """ op = DataFrameRename(index_mapper=names, level=level, output_types=get_output_types(index)) ret = op(index) if inplace: df_or_series = getattr(index, '_get_df_or_series', lambda: None)() if df_or_series is not None: rename_axis_with_level(df_or_series, names, axis=index._axis, level=level, inplace=True) index.data = df_or_series.axes[index._axis].data else: index.data = ret.data else: return ret
4ad24ea4c1fd42b1259d43e273c44c4295a9e329
3,653,948
def get_critic(obs_dim: int) -> tf.keras.Model: """Get a critic that returns the expect value for the current state""" observation = tf.keras.Input(shape=(obs_dim,), name='observation') x = layers.Dense(64, activation='tanh')(observation) x = layers.Dense(64, activation='tanh')(x) value = layers.Dense(1, name='value')(x) critic = tf.keras.Model(observation, value) # critic.summary() return critic
69082d6260666c733e32093fbc7180726f77acc6
3,653,949
def register_and_login_test_user(c): """ Helper function that makes an HTTP request to register a test user Parameters ---------- c : object Test client object Returns ------- str Access JWT in order to use in subsequent tests """ c.post( "/api/auth/register", json={ "username": "test", "password": "secret", "first_name": "tim", "last_name": "apple", "email": "[email protected]", "birthday": "1990-01-01", }, ) setup_resp = c.post( "/api/auth/login", json={"username": "test", "password": "secret"} ) setup_resp_json = setup_resp.get_json() setup_access_token = setup_resp_json["access_token"] return setup_access_token
b76f7f6afa9af453246ae304b1b0504bd68b8919
3,653,950
def get_ssh_challenge_token(account, appid, ip=None, vo='def'): """ Get a challenge token for subsequent SSH public key authentication. The challenge token lifetime is 5 seconds. :param account: Account identifier as a string. :param appid: The application identifier as a string. :param ip: IP address of the client as a string. :param vo: The VO to act on. :returns: A dict with token and expires_at entries. """ kwargs = {'account': account} if not permission.has_permission(issuer=account, vo=vo, action='get_ssh_challenge_token', kwargs=kwargs): raise exception.AccessDenied('User can not get challenge token for account %s' % account) account = InternalAccount(account, vo=vo) return authentication.get_ssh_challenge_token(account, appid, ip)
12da26b4a20e648ca9fc6d325647f42288324b83
3,653,951
def rootpath_capacity_exceeded(rootpath,newSize): """ Return True if rootpath is already allocated to the extent it cannot accomadate newSize, otherwise return False """ vols_in_rootpath = Volume.objects.filter(root_path=rootpath) rootpathallocsum = 0 if vols_in_rootpath.count() > 0: rootpathallocsum = vols_in_rootpath.aggregate( alSize=db.models.Sum('size_GB'))['alSize'] if rootpathallocsum + newSize > rootpath.capacity_GB: return True return False
3b8d90f3693ce12de93c967d20c6a7b2ccb7ec38
3,653,952
import requests import json def user_token(user: str) -> str: """ Authorize this request with the GitHub app set by the 'app_id' and 'private_key' environment variables. 1. Get the installation ID for the user that has installed the app 2. Request a new token for that user 3. Return it so it can be used in future API requests """ # Hardcode the installation to PyTorch so we can always get a valid ID key id = installation_id("pytorch") url = f"https://api.github.com/app/installations/{id}/access_tokens" r_bytes = requests.post(url, headers=app_headers()) r = json.loads(r_bytes.content.decode()) token = str(r["token"]) return token
c02fae92505a922f58f231682a009d24ed6432bc
3,653,953
def file_exists(path): """ Return True if the file from the path exists. :param path: A string containing the path to a file. :return: a boolean - True if the file exists, otherwise False """ return isfile(path)
79610224c3e83f6ba4fdeb98b1faaf932c249ff2
3,653,954
from typing import Callable from typing import Dict def _static_to_href(pathto: Callable, favicon: Dict[str, str]) -> Dict[str, str]: """If a ``static-file`` is provided, returns a modified version of the icon attributes replacing ``static-file`` with the correct ``href``. If both ``static-file`` and ``href`` are provided, ``href`` will be ignored. """ if FILE_FIELD in favicon: attrs = favicon.copy() attrs["href"] = pathto( f"{OUTPUT_STATIC_DIR}/{attrs.pop(FILE_FIELD)}", resource=True ) return attrs return favicon
f54e5ced825dc44bfa14a09622c7fa9b179660c5
3,653,955
import torch def concatenate(tensor1, tensor2, axis=0): """ Basically a wrapper for torch.dat, with the exception that the array itself is returned if its None or evaluates to False. :param tensor1: input array or None :type tensor1: mixed :param tensor2: input array :type tensor2: numpy.ndarray :param axis: axis to concatenate :type axis: int :return: concatenated array :rtype: numpy.ndarray """ assert isinstance(tensor2, torch.Tensor) or isinstance(tensor2, torch.autograd.Variable) if tensor1 is not None: assert isinstance(tensor1, torch.Tensor) or isinstance(tensor1, torch.autograd.Variable) return torch.cat((tensor1, tensor2), axis=axis) else: return tensor2
24791a201f1ddb64cd2d3f683ecc38471d21697b
3,653,956
def setKey(key, keytype): """ if keytype is valid, save a copy of key accordingly and check if the key is valid """ global _key, _keytype, FREE_API_KEY, PREMIUM_API_KEY keytype = keytype.lower() if keytype in ("f", "fr", "free"): keytype = "free" FREE_API_KEY = key elif keytype.startswith("prem") or keytype in ("nonfree", "non-free"): keytype = "premium" PREMIUM_API_KEY = key else: print "invalid keytype", keytype return oldkey = _key oldkeytype = _keytype _key = key _keytype = keytype w = LocalWeather("london") # w.data != False rather than w.data to suppress Python 2.7 FurtureWarning: # "The behavior of this method will change in future versions...." if w is not None and hasattr(w, 'data') and w.data is not False: return True else: print "The key is not valid." _key = oldkey _keytype = oldkeytype return False
8baab2972ea5c9fbe33845aaed7c1ab4cc631a2e
3,653,957
import functools import operator def sum_(obj): """Sum the values in the given iterable. Different from the built-in summation function, the summation is based on the first item in the iterable. Or a SymPy integer zero is created when the iterator is empty. """ i = iter(obj) try: init = next(i) except StopIteration: return Integer(0) else: return functools.reduce(operator.add, i, init)
70727443ba5a62e5bd91e3c0a60130f6cc0b65e5
3,653,958
def setup_s3_client(job_data): """Creates an S3 client Uses the credentials passed in the event by CodePipeline. These credentials can be used to access the artifact bucket. :param job_data: The job data structure :return: An S3 client with the appropriate credentials """ try: key_id = job_data['artifactCredentials']['accessKeyId'] key_secret = job_data['artifactCredentials']['secretAccessKey'] session_token = job_data['artifactCredentials']['sessionToken'] session = Session(aws_access_key_id=key_id, aws_secret_access_key=key_secret, aws_session_token=session_token) except Exception as e: logger.warn('No credentials in artifact - using default role access: {}'.format(e)) session = Session() return session.client('s3', config=botocore.client.Config(signature_version='s3v4'))
bb51e03de125eeb6ff5e1e6d16b50ba07fdc7c56
3,653,959
def pad_sequences(sequences, maxlen=None, dtype='int32', padding='pre', truncating='pre', value=0.): """ FROM KERAS Pads each sequence to the same length: the length of the longest sequence. If maxlen is provided, any sequence longer than maxlen is truncated to maxlen. Truncation happens off either the beginning (default) or the end of the sequence. Supports post-padding and pre-padding (default). # Arguments sequences: list of lists where each element is a sequence maxlen: int, maximum length dtype: type to cast the resulting sequence. padding: 'pre' or 'post', pad either before or after each sequence. truncating: 'pre' or 'post', remove values from sequences larger than maxlen either in the beginning or in the end of the sequence value: float, value to pad the sequences to the desired value. # Returns x: numpy array with dimensions (number_of_sequences, maxlen) """ lengths = [len(s) for s in sequences] nb_samples = len(sequences) if maxlen is None: maxlen = np.max(lengths) # take the sample shape from the first non empty sequence # checking for consistency in the main loop below. sample_shape = tuple() for s in sequences: if len(s) > 0: sample_shape = np.asarray(s).shape[1:] break x = (np.ones((nb_samples, maxlen) + sample_shape) * value).astype(dtype) for idx, s in enumerate(sequences): if len(s) == 0: continue # empty list was found if truncating == 'pre': trunc = s[-maxlen:] elif truncating == 'post': trunc = s[:maxlen] else: raise ValueError('Truncating type "%s" not understood' % truncating) # check `trunc` has expected shape trunc = np.asarray(trunc, dtype=dtype) if trunc.shape[1:] != sample_shape: raise ValueError('Shape of sample %s of sequence at position %s is different from expected shape %s' % (trunc.shape[1:], idx, sample_shape)) if padding == 'post': x[idx, :len(trunc)] = trunc elif padding == 'pre': x[idx, -len(trunc):] = trunc else: raise ValueError('Padding type "%s" not understood' % padding) return x
5d4603c5a71f898dc4f501d2424707ea10adbd0e
3,653,961
def basic_pyxll_function_22(x, y, z): """if z return x, else return y""" if z: # we're returning an integer, but the signature # says we're returning a float. # PyXLL will convert the integer to a float for us. return x return y
851b5eef683b0456a0f5bce7f3850698693b067e
3,653,962
def job_hotelling(prob_label, tr, te, r, ni, n): """Hotelling T-squared test""" with util.ContextTimer() as t: htest = tst.HotellingT2Test(alpha=alpha) test_result = htest.perform_test(te) return { 'test_method': htest, 'test_result': test_result, 'time_secs': t.secs}
137a3b4426a01675fe6995408ad71eec34126341
3,653,963
def return_circle_aperature(field, mask_r): """Filter the circle aperature of a light field. Filter the circle aperature of a light field. Parameters ---------- field : Field Input square field. mask_r : float, from 0 to 1 Radius of a circle mask. Returns ---------- X : array-like Filtered meshgird X. Y : array-like Filtered meshgrid Y. """ length = field.shape[0] norm_length = np.linspace(-1, 1, length) X, Y = np.meshgrid(norm_length, norm_length) norm_radius = np.sqrt(X**2 + Y**2) X[norm_radius > mask_r] = np.nan Y[norm_radius > mask_r] = np.nan return X, Y, norm_radius
49ae52bfa639cf8a7b8592caa1dbf8dbdef115f8
3,653,964
import json def user_get(): """ Get information from the database about an user, given his id. If there are field names received in the body, only those will be queried. If no field is provided, every field will be selected. The body should be a JSON object following the schema: { "user_id": id, "fields": ["field1", ...] } Returns: Response: - 200 in case of success and the user info in the body. - 400 if the body does not have all the necessary information or the field names are wrong. - 404 if the user is not found. """ body_schema = { "type": "object", "properties": { "user_id": {"type": "number"}, "fields": { "type": "array", "minItems": 1, "items": { "type": "string", } } }, "required": ["user_id"] } payload = request.get_json(silent=True) is_valid = validate_json(payload, body_schema) if not is_valid: return Response(status=400) user_id = payload["user_id"] if "fields" in payload: fields = payload["fields"] query = sql.SQL("SELECT {} FROM users WHERE user_id={};").format( sql.SQL(", ").join(map(sql.Identifier, fields)), sql.Literal(user_id) ) else: query = sql.SQL("SELECT * FROM users WHERE user_id={};").format( sql.Literal(payload["user_id"]) ) cursor = CONN.cursor(cursor_factory=RealDictCursor) try: cursor.execute(query) results = cursor.fetchall() except psycopg2.errors.UndefinedColumn: CONN.rollback() return Response(status=400) finally: cursor.close() CONN.commit() if len(results) == 0: return Response(status=404) return Response( status=200, response=json.dumps(results), mimetype="application/json" )
02b1a1bc1bedc2098bd907098a40fad41c2391d7
3,653,965
def get_data_file_args(args, language): """ For a interface, return the language-specific set of data file arguments Args: args (dict): Dictionary of data file arguments for an interface language (str): Language of the testbench Returns: dict: Language-specific data file arguments """ if language in args: return args[language] return args["generic"]
11e30b92316bad9a46b87bd9188f97d5e8860377
3,653,966
def branch_exists(branch): """Return True if the branch exists.""" try: run_git("rev-parse --verify {}".format(branch), quiet=True) return True except ProcessExecutionError: return False
8f08eeb78b322220def2f883e8172ac94df97063
3,653,967
import numpy def spectrum_like_noise(signal: numpy.ndarray, *, sampling_rate=40000, keep_signal_amp_envelope=False, low_pass_cutoff=50, # Hz low_pass_order=6, seed: int = 42, window_length_sec: float = 20 / 1000, # 20 ms p_overlap: float = .5, long_term_avg: bool = True ) -> numpy.ndarray: """Create a noise with same spectrum as the input signal. randomises phase Parameters ---------- signal : array_like Input signal. sampling_rate : int Sampling frequency of the input signal. (Default value = 40000) keep_signal_amp_envelope : bool Apply the envelope of the original signal to the noise. (Default value = False) low_pass_cutoff : float low_pass_order : int seed : int long_term_avg : bool window_length_sec: int p_overlap: float Returns ------- ndarray Noise signal. """ assert window_length_sec > 0 assert 0 <= p_overlap <= 1 signal = zero_pad_to_power_2(signal) # Ensure welch works with any window size signal_length = signal.shape[-1] window_sum_squares = signal_length # scaling factor defined as sum of squared samples of window function sc = 2 / (sampling_rate * window_sum_squares) # Scaling coefficient 2 takes into account removal of energy at negative frequencies (we drop this side of PSD) if not long_term_avg: n_fft = next_pow_2(signal_length) spec = numpy.abs(fft.rfft(signal, n_fft)) psd = (spec ** 2) * sc else: n_per_seg = next_pow_2(int(sampling_rate * window_length_sec)) # next_pow_2 per seg == n_fft n_overlap = int(n_per_seg * p_overlap) f, psd = welch(signal, sampling_rate, nperseg=n_per_seg, noverlap=n_overlap, scaling='density', return_onesided=True, detrend=False, # window='boxcar', window='hanning', ) n_fft = n_per_seg psd /= (signal_length / n_per_seg ) # normalise? spec = numpy.sqrt((psd / sc)) noise = [] runs = signal_length // n_fft for i in range(runs + 1): numpy_seed(seed + i) noise.extend(numpy.real( fft.irfft( spec * numpy.exp(2 * numpy.pi * 1j * numpy.random.random(spec.shape[-1])), # Randomise phase. 0->360, 2 pi rads n_fft))) # Give each spectral component a random phase, PHI(f(k)) = random number, # uniformly distributed between 0 and 360 degrees (or equivalently, between 0 and 2Pi radians); noise = numpy.array(noise)[:signal_length] if keep_signal_amp_envelope: [bb, aa] = butter(low_pass_order, low_pass_cutoff / (sampling_rate / 2)) # Cutoff Hz, LP filter noise *= filtfilt(bb, # numerator aa, # denominator hilbert_envelope(signal) # envelope of speech signal in time domain ) return numpy.expand_dims(noise, 0)
6e6ced3a5220a9a1d66c83a33a9232d265d18a1a
3,653,968
import re def check_string_capitalised(string): """ Check to see if a string is in all CAPITAL letters. Boolean. """ return bool(re.match('^[A-Z_]+$', string))
f496d79fafae4c89c3686856b42113c4818f7ed8
3,653,969
import torch def sample_zero_entries(edge_index, seed, num_nodes, sample_mult=1.0): """Obtain zero entries from a sparse matrix. Args: edge_index (tensor): (2, N), N is the number of edges. seed (int): to control randomness num_nodes (int): number of nodes in the graph sample_mult (float): the number of edges sampled is N * sample_mult. Returns: torch.tensor, (2, N) containing zero entries """ n_edges = edge_index.shape[1] np.random.seed(seed) # Number of edges in both directions must be even n_samples = int(np.ceil(sample_mult * n_edges / 2) * 2) adjacency = adj_from_edge_index(edge_index, num_nodes) zero_entries = np.zeros([2, n_samples], dtype=np.int32) nonzero_or_sampled = set(zip(*adjacency.nonzero())) i = 0 while True: t = tuple(np.random.randint(0, adjacency.shape[0], 2)) # Don't sample diagonal of the adjacency matrix if t[0] == t[1]: continue if t not in nonzero_or_sampled: # Add edge in both directions t_rev = (t[1], t[0]) zero_entries[:, i] = t zero_entries[:, i+1] = t_rev i += 2 if i == n_samples: break nonzero_or_sampled.add(t) nonzero_or_sampled.add(t_rev) return torch.tensor(zero_entries, dtype=torch.long)
211e97fa0a2622d49c50673c0b6255954383f3a0
3,653,970
import textwrap def ped_file_parent_missing(fake_fs): """Return fake file system with PED file""" content = textwrap.dedent( """ # comment FAM II-1\tI-1\t0\t1\t2 FAM I-1 0\t0\t1\t1 """ ).strip() fake_fs.fs.create_file("/test.ped", create_missing_dirs=True, contents=content) return fake_fs
9df19ab925984236aa581c9b8843591f05d3b7b4
3,653,971
import random import requests def GettingAyah(): """The code used to get an Ayah from the Quran every fixed time""" while True: ayah = random.randint(1, 6237) url = f'http://api.alquran.cloud/v1/ayah/{ayah}' res = requests.get(url) if len(res.json()['data']['text']) <= 280: return res.json()['data']['text']
5739cbd3554b97f01eefef7f59a4087e5497e3e7
3,653,972
def iterdecode(value): """ Decode enumerable from string presentation as a tuple """ if not value: return tuple() result = [] accumulator = u'' escaped = False for c in value: if not escaped: if c == CHAR_ESCAPE: escaped = True continue elif c == CHAR_SEPARATOR: result.append(accumulator) accumulator = u'' continue else: escaped = False accumulator += c result.append(accumulator) return tuple(result)
d8b03338a4578ee7b37a4f6d31d23463fc0a9b84
3,653,973
def run_resolution_filter(image=None, image_path=None, height=600, width=1000): """ This will take the image which is correctly rotated yolo output. Initially, We are doing for driving licenses only, Will return 1 if the height and width are greater than 700 and 1100 pixels else it will return 10002 :return: """ result = False if image is not None: result = test_image_height_and_width(image, desired_width=width, desired_height=height) if image_path is not None and image is None: img = cv2.imread(image_path) result = test_image_height_and_width(img, desired_width=width, desired_height=height) if result: return 1 else: return 10002
fde3040dbb29d6c5f7d79237df51f425d2d043b4
3,653,974
import types def text_to_emotion(text): """ テキストから感情を推測して返す Parameters ---------- text : string テキスト Returns ------- {'magnitude','score'} """ client = language.LanguageServiceClient() document = types.Document( content=text, type=enums.Document.Type.PLAIN_TEXT, language="ja" ) sentiment = client.analyze_sentiment(document=document).document_sentiment return {'magnitude':sentiment.magnitude,'score':sentiment.score}
b3b784fa777146f7f1a361784f848b28651676a4
3,653,975
def process_actions(list_response_json, headers, url, force_reset): """ If a policy does not exist on a given cluster find the right values defined in qos_dict and apply them """ qos_dict = {} # This dictionary sets the tiers and min/max/burst settings qos_dict['tiers'] = {"bronze": [500, 5000, 10000], "silver": [2000, 20000, 50000], "gold": [5000, 100000, 150000]} # Check to see if there are no policies set force_reset_dict = {} if len(list_response_json['result']['qosPolicies']) == 0: print(f"No existing QoS Policies found, implementing full install") for qos_key, qos_val in qos_dict['tiers'].items(): pol_name = qos_key min_iops = qos_val[0] max_iops = qos_val[1] burst_iops = qos_val[2] payload = build_payload(pol_name, min_iops, max_iops, burst_iops) connect_cluster(headers, url, payload) # If there are policies ignore them if they match names, remove that # name from the dict and move on else: for policy in list_response_json['result']['qosPolicies']: pol_name = policy['name'] if pol_name in qos_dict['tiers'].keys(): pol_id = policy['qosPolicyID'] min_iops = qos_dict['tiers'][pol_name][0] max_iops = qos_dict['tiers'][pol_name][1] burst_iops = qos_dict['tiers'][pol_name][2] pol_min = policy['qos']['minIOPS'] pol_max = policy['qos']['maxIOPS'] pol_burst = policy['qos']['burstIOPS'] if ((min_iops != pol_min or max_iops != pol_max or burst_iops != pol_burst) and force_reset is True): print(f"Policy mismatch detected on {pol_name}... resetting " f"as reset flag is set to True") print(qos_dict['tiers'][pol_name]) modify_qos_policy(headers, url, pol_id, min_iops, max_iops, burst_iops) elif ((min_iops != pol_min or max_iops != pol_max or burst_iops != pol_burst) and force_reset is False): print(f"Policy mismatch detected on {pol_name}... Leaving " f"as reset flag is set to false") else: print(f"QoS Policy {pol_name} found, policy is not in " f"configuration dictionary. Ignoring") pass if policy['name'] in qos_dict['tiers'].keys(): qos_dict['tiers'].pop(pol_name) return qos_dict
1c3651518c8c0f0f174876afdd0961099b3af342
3,653,976
def validate_ac_power(observation, values): """ Run a number of validation checks on a daily timeseries of AC power. Parameters ---------- observation : solarforecastarbiter.datamodel.Observation Observation object that the data is associated with values : pandas.Series Series of observation values Returns ------- timestamp_flag : pandas.Series Bitmask from :py:func:`.validator.check_timestamp_spacing` night_flag : pandas.Series Bitmask from :py:func:`.validator.check_day_night` or :py:func:`.validator.check_day_night_interval` limit_flag : pandas.Series Bitmask from :py:func:`.validator.check_ac_power_limits` """ solar_position, dni_extra, timestamp_flag, night_flag = _solpos_dni_extra( observation, values) day_night = \ ~quality_mapping.convert_mask_into_dataframe(night_flag)['NIGHTTIME'] limit_flag = validator.check_ac_power_limits( values, day_night, observation.site.modeling_parameters.ac_capacity, _return_mask=True) return timestamp_flag, night_flag, limit_flag
fcc487f61276e319316df3f99559ef935a7f0e7b
3,653,977
import itertools import six def partition(predicate, iterable): """Use `predicate` to partition entries into falsy and truthy ones. Recipe taken from the official documentation. https://docs.python.org/3/library/itertools.html#itertools-recipes """ t1, t2 = itertools.tee(iterable) return ( six.moves.filterfalse(predicate, t1), six.moves.filter(predicate, t2), )
5777203d9d34a9ffddc565129d8dda3ec91efc8e
3,653,978
def get_node(obj, path): """Retrieve a deep object based on a path. Return either a Wrapped instance if the deep object is not a node, or another type of object.""" subobj = obj indices = [] for item in path: try: subobj = subobj[item] except Exception as e: indices.append(item) subobj, indices = _select(subobj, indices) if isinstance(subobj, dict) or (isinstance(subobj, list) and subobj and isinstance(subobj[0], dict)): return Wrapped(obj, path) else: assert not indices, "This path does not exist." return subobj
f48cb0dc5ae149d0758348725ebc521e7838230f
3,653,980
def without_bond_orders(gra): """ resonance graph with maximum spin (i.e. no pi bonds) """ bnd_keys = list(bond_keys(gra)) # don't set dummy bonds to one! bnd_ord_dct = bond_orders(gra) bnd_vals = [1 if v != 0 else 0 for v in map(bnd_ord_dct.__getitem__, bnd_keys)] bnd_ord_dct = dict(zip(bnd_keys, bnd_vals)) return set_bond_orders(gra, bnd_ord_dct)
88b785c802a1d74a12f64a1eab6403429fa00cad
3,653,981
def check_struc(d1, d2, errors=[], level='wf'): """Recursively check struct of dictionary 2 to that of dict 1 Arguments --------- d1 : dict Dictionary with desired structure d2 : dict Dictionary with structre to check errors : list of str, optional Missing values in d2. Initial value is []. level : str, optional Level of search. Inital value is 'wf' (wind farm) for top-level dictionary. Returns ------- errors : list of str Missing values in d2. """ for k1, v1 in d1.items(): # loop through keys and values in first dict if k1 not in d2.keys(): # if key doesn't exist in d2 errors.append('{} not in dictionary'.format('.'.join([level,k1]))) elif isinstance(v1, dict): # otherwise, if item is a dict, recurse errors = check_struc(v1, d2[k1], errors=errors, # pass in accumulated errros level='.'.join([level, k1])) # change level return errors
aa835e7bbd6274e73d0b3d45d1ec4d617af0a167
3,653,982
def indexoflines(LFtop): """ Determining selected line index of Gromacs compatible topology files """ file1 = open(LFtop, "r") readline = file1.readlines() lineindex = ["x", "x", "x"] n = 0 for line in readline: linelist = line.split() if "atomtypes" in linelist: lineindex[0] = n n += 1 elif "moleculetype" in linelist: lineindex[1] = n n += 1 elif "system" in linelist: lineindex[2] = n n += 1 else: n += 1 file1.close() Idx = 0 while Idx < len(lineindex): if not str(lineindex[Idx]).isnumeric() == True: lineindex[Idx] = n + 1 Idx += 1 else: Idx += 1 return {'atomtypes': lineindex[0], 'moleculetype': lineindex[1], 'system': lineindex[2]}
dd2653c6245d9f7a0fa8647dcc841c51e01f9b2d
3,653,983
def create_mock_github(user='octo-cat', private=False): """Factory for mock GitHub objects. Example: :: >>> github = create_mock_github(user='octocat') >>> github.branches(user='octocat', repo='hello-world') >>> [{u'commit': {u'sha': u'e22d92d5d90bb8f9695e9a5e2e2311a5c1997230', ... u'url': u'https://api.github.com/repos/octocat/mock-repo/commits/e22d92d5d90bb8f9695e9a5e2e2311a5c1997230'}, ... u'name': u'dev'}, ... {u'commit': {u'sha': u'444a74d0d90a4aea744dacb31a14f87b5c30759c', ... u'url': u'https://api.github.com/repos/octocat/mock-repo/commits/444a74d0d90a4aea744dacb31a14f87b5c30759c'}, ... u'name': u'master'}, ... {u'commit': {u'sha': u'c6eaaf6708561c3d4439c0c8dd99c2e33525b1e6', ... u'url': u'https://api.github.com/repos/octocat/mock-repo/commits/c6eaaf6708561c3d4439c0c8dd99c2e33525b1e6'}, ... u'name': u'no-bundle'}] :param str user: Github username. :param bool private: Whether repo is private. :return: An autospecced GitHub Mock object """ github_mock = mock.create_autospec(GitHub) github_mock.repo.return_value = github3.repos.Repository.from_json({ u'archive_url': u'https://api.github.com/repos/{user}/mock-repo/{{archive_format}}{{/ref}}'.format(user=user), u'assignees_url': u'https://api.github.com/repos/{user}/mock-repo/assignees{{/user}}'.format(user=user), u'blobs_url': u'https://api.github.com/repos/{user}/mock-repo/git/blobs{{/sha}}'.format(user=user), u'branches_url': u'https://api.github.com/repos/{user}/mock-repo/branches{{/bra.format(user=user)nch}}'.format(user=user), u'clone_url': u'https://github.com/{user}/mock-repo.git'.format(user=user), u'collaborators_url': u'https://api.github.com/repos/{user}/mock-repo/collaborators{{/collaborator}}'.format(user=user), u'comments_url': u'https://api.github.com/repos/{user}/mock-repo/comments{{/number}}'.format(user=user), u'commits_url': u'https://api.github.com/repos/{user}/mock-repo/commits{{/sha}}'.format(user=user), u'compare_url': u'https://api.github.com/repos/{user}/mock-repo/compare/{{base}}...{{head}}', u'contents_url': u'https://api.github.com/repos/{user}/mock-repo/contents/{{+path}}'.format(user=user), u'contributors_url': u'https://api.github.com/repos/{user}/mock-repo/contributors'.format(user=user), u'created_at': u'2013-06-30T18:29:18Z', u'default_branch': u'dev', u'description': u'Simple, Pythonic, text processing--Sentiment analysis, part-of-speech tagging, noun phrase extraction, translation, and more.', u'downloads_url': u'https://api.github.com/repos/{user}/mock-repo/downloads'.format(user=user), u'events_url': u'https://api.github.com/repos/{user}/mock-repo/events'.format(user=user), u'fork': False, u'forks': 89, u'forks_count': 89, u'forks_url': u'https://api.github.com/repos/{user}/mock-repo/forks', u'full_name': u'{user}/mock-repo', u'git_commits_url': u'https://api.github.com/repos/{user}/mock-repo/git/commits{{/sha}}'.format(user=user), u'git_refs_url': u'https://api.github.com/repos/{user}/mock-repo/git/refs{{/sha}}'.format(user=user), u'git_tags_url': u'https://api.github.com/repos/{user}/mock-repo/git/tags{{/sha}}'.format(user=user), u'git_url': u'git://github.com/{user}/mock-repo.git'.format(user=user), u'has_downloads': True, u'has_issues': True, u'has_wiki': True, u'homepage': u'https://mock-repo.readthedocs.org/', u'hooks_url': u'https://api.github.com/repos/{user}/mock-repo/hooks'.format(user=user), u'html_url': u'https://github.com/{user}/mock-repo'.format(user=user), u'id': 11075275, u'issue_comment_url': u'https://api.github.com/repos/{user}/mock-repo/issues/comments/{{number}}'.format(user=user), u'issue_events_url': u'https://api.github.com/repos/{user}/mock-repo/issues/events{{/number}}'.format(user=user), u'issues_url': u'https://api.github.com/repos/{user}/mock-repo/issues{{/number}}'.format(user=user), u'keys_url': u'https://api.github.com/repos/{user}/mock-repo/keys{{/key_id}}'.format(user=user), u'labels_url': u'https://api.github.com/repos/{user}/mock-repo/labels{{/name}}'.format(user=user), u'language': u'Python', u'languages_url': u'https://api.github.com/repos/{user}/mock-repo/languages'.format(user=user), u'master_branch': u'dev', u'merges_url': u'https://api.github.com/repos/{user}/mock-repo/merges'.format(user=user), u'milestones_url': u'https://api.github.com/repos/{user}/mock-repo/milestones{{/number}}'.format(user=user), u'mirror_url': None, u'name': u'mock-repo', u'network_count': 89, u'notifications_url': u'https://api.github.com/repos/{user}/mock-repo/notifications{{?since,all,participating}}'.format(user=user), u'open_issues': 2, u'open_issues_count': 2, u'owner': {u'avatar_url': u'https://gravatar.com/avatar/c74f9cfd7776305a82ede0b765d65402?d=https%3A%2F%2Fidenticons.github.com%2F3959fe3bcd263a12c28ae86a66ec75ef.png&r=x', u'events_url': u'https://api.github.com/users/{user}/events{{/privacy}}'.format(user=user), u'followers_url': u'https://api.github.com/users/{user}/followers'.format(user=user), u'following_url': u'https://api.github.com/users/{user}/following{{/other_user}}'.format(user=user), u'gists_url': u'https://api.github.com/users/{user}/gists{{/gist_id}}'.format(user=user), u'gravatar_id': u'c74f9cfd7776305a82ede0b765d65402', u'html_url': u'https://github.com/{user}'.format(user=user), u'id': 2379650, u'login': user, u'organizations_url': u'https://api.github.com/users/{user}/orgs'.format(user=user), u'received_events_url': u'https://api.github.com/users/{user}/received_events', u'repos_url': u'https://api.github.com/users/{user}/repos'.format(user=user), u'site_admin': False, u'starred_url': u'https://api.github.com/users/{user}/starred{{/owner}}{{/repo}}', u'subscriptions_url': u'https://api.github.com/users/{user}/subscriptions'.format(user=user), u'type': u'User', u'url': u'https://api.github.com/users/{user}'.format(user=user)}, u'private': private, u'pulls_url': u'https://api.github.com/repos/{user}/mock-repo/pulls{{/number}}'.format(user=user), u'pushed_at': u'2013-12-30T16:05:54Z', u'releases_url': u'https://api.github.com/repos/{user}/mock-repo/releases{{/id}}'.format(user=user), u'size': 8717, u'ssh_url': u'[email protected]:{user}/mock-repo.git'.format(user=user), u'stargazers_count': 1469, u'stargazers_url': u'https://api.github.com/repos/{user}/mock-repo/stargazers'.format(user=user), u'statuses_url': u'https://api.github.com/repos/{user}/mock-repo/statuses/{{sha}}'.format(user=user), u'subscribers_count': 86, u'subscribers_url': u'https://api.github.com/repos/{user}/mock-repo/subscribers'.format(user=user), u'subscription_url': u'https://api.github.com/repos/{user}/mock-repo/subscription'.format(user=user), u'svn_url': u'https://github.com/{user}/mock-repo'.format(user=user), u'tags_url': u'https://api.github.com/repos/{user}/mock-repo/tags'.format(user=user), u'teams_url': u'https://api.github.com/repos/{user}/mock-repo/teams'.format(user=user), u'trees_url': u'https://api.github.com/repos/{user}/mock-repo/git/trees{{/sha}}'.format(user=user), u'updated_at': u'2014-01-12T21:23:50Z', u'url': u'https://api.github.com/repos/{user}/mock-repo'.format(user=user), u'watchers': 1469, u'watchers_count': 1469, # NOTE: permissions are only available if authorized on the repo 'permissions': { 'push': True } }) github_mock.branches.return_value = [ Branch.from_json({u'commit': {u'sha': u'e22d92d5d90bb8f9695e9a5e2e2311a5c1997230', u'url': u'https://api.github.com/repos/{user}/mock-repo/commits/e22d92d5d90bb8f9695e9a5e2e2311a5c1997230'.format(user=user)}, u'name': u'dev'}), Branch.from_json({u'commit': {u'sha': u'444a74d0d90a4aea744dacb31a14f87b5c30759c', u'url': u'https://api.github.com/repos/{user}/mock-repo/commits/444a74d0d90a4aea744dacb31a14f87b5c30759c'.format(user=user)}, u'name': u'master'}), Branch.from_json({u'commit': {u'sha': u'c6eaaf6708561c3d4439c0c8dd99c2e33525b1e6', u'url': u'https://api.github.com/repos/{user}/mock-repo/commits/c6eaaf6708561c3d4439c0c8dd99c2e33525b1e6'.format(user=user)}, u'name': u'no-bundle'}) ] # http://developer.github.com/v3/repos/contents/ github_mock.contents.return_value = { 'octokit.rb': github3.repos.contents.Contents.from_json({ "type": "file", "size": 625, "name": u"\xf0octokit.rb", "path": u"\xf0octokit.rb", "sha": "fff6fe3a23bf1c8ea0692b4a883af99bee26fd3b", "url": "https://api.github.com/repos/{user}/octokit/contents/lib/octokit.rb".format(user=user), "git_url": "https://api.github.com/repos/{user}/octokit/git/blobs/fff6fe3a23bf1c8ea0692b4a883af99bee26fd3b".format(user=user), "html_url": "https://github.com/{user}/octokit/blob/master/lib/octokit.rb", "_links": { "self": "https://api.github.com/repos/{user}/octokit/contents/lib/octokit.rb".format(user=user), "git": "https://api.github.com/repos/{user}/octokit/git/blobs/fff6fe3a23bf1c8ea0692b4a883af99bee26fd3b".format(user=user), "html": "https://github.com/{user}/octokit/blob/master/lib/octokit.rb" } }), 'octokit': github3.repos.contents.Contents.from_json({ "type": "dir", "size": 0, "name": u"\xf0octokit", "path": u"\xf0octokit", "sha": "a84d88e7554fc1fa21bcbc4efae3c782a70d2b9d", "url": "https://api.github.com/repos/{user}/octokit/contents/lib/octokit".format(user=user), "git_url": "https://api.github.com/repos/{user}/octokit/git/trees/a84d88e7554fc1fa21bcbc4efae3c782a70d2b9d", "html_url": "https://github.com/{user}/octokit/tree/master/lib/octokit".format(user=user), "_links": { "self": "https://api.github.com/repos/{user}/octokit/contents/lib/octokit".format(user=user), "git": "https://api.github.com/repos/{user}/octokit/git/trees/a84d88e7554fc1fa21bcbc4efae3c782a70d2b9d".format(user=user), "html": "https://github.com/{user}/octokit/tree/master/lib/octokit".format(user=user) } }) } github_mock.tree.return_value = github3.git.Tree.from_json({ 'url': u'https://api.github.com/repos/{user}/mock-repo/git/trees/dev'.format(user=user), 'sha': 'dev', 'tree': [ {u'mode': u'100644', u'path': u'coveragerc', u'sha': u'92029ff5ce192425d346b598d7e7dd25f5f05185', u'size': 245, u'type': u'blob', u'url': u'https://api.github.com/repos/{user}/mock-repo/git/blobs/92029ff5ce192425d346b598d7e7dd25f5f05185'.format(user=user)}, {u'mode': u'100644', u'path': u'.gitignore', u'sha': u'972ac8aeb0e652642b042064c835f27419e197b4', u'size': 520, u'type': u'blob', u'url': u'https://api.github.com/repos/{user}/mock-repo/git/blobs/972ac8aeb0e652642b042064c835f27419e197b4'.format(user=user)}, {u'mode': u'100644', u'path': u'.travis.yml', u'sha': u'86e1fef2834cc2682e753f3ed26ab3c2e100478c', u'size': 501, u'type': u'blob', u'url': u'https://api.github.com/repos/{user}/mock-repo/git/blobs/86e1fef2834cc2682e753f3ed26ab3c2e100478c'.format(user=user)} ] }) github_mock.commits.return_value = [ { 'sha': '12345', 'name': 'authname', 'email': 'authmail', 'date': 'yesterday', } ] return github_mock
7eaffcc7bc22657eaf3c3e7d41d9492300128a73
3,653,984
def top_9(limit=21): """Vrni dano število knjig (privzeto 9). Rezultat je seznam, katerega elementi so oblike [knjiga_id, avtor,naslov,slika] """ cur.execute( """SELECT book_id, authors, title, original_publication_year, average_rating,image_url FROM books ORDER BY average_rating DESC LIMIT %s """, [limit]) najboljsi = cur.fetchall() # Vrnemo nabor, kot je opisano v dokumentaciji funkcije: return(najboljsi)
ec87714ea5925c5d4115b0ee091597f7ffb1c323
3,653,986
def inverseTranslateTaps(lowerTaps, pos): """Method to translate tap integer in range [-lower_taps, raise_taps] to range [0, lowerTaps + raiseTaps] """ # Hmmm... is it this simle? posOut = pos + lowerTaps return posOut
827bdfc51b3581b7b893ff8ff02dd5846ff6cd0f
3,653,988
def GMLstring2points(pointstring): """Convert list of points in string to a list of points. Works for 3D points.""" listPoints = [] #-- List of coordinates coords = pointstring.split() #-- Store the coordinate tuple assert(len(coords) % 3 == 0) for i in range(0, len(coords), 3): listPoints.append([float(coords[i]), float(coords[i+1]), float(coords[i+2])]) return listPoints
e755d344d163bdcdb114d0c9d614a1bbd40be29f
3,653,989
def set_(key, value, service=None, profile=None): # pylint: disable=W0613 """ Set a key/value pair in the etcd service """ client = _get_conn(profile) client.set(key, value) return get(key, service, profile)
c9689c53dc837caf182ac0b5d0e8552888ec70e9
3,653,990
def compute_cw_score_normalized(p, q, edgedict, ndict, params = None): """ Computes the common weighted normalized score between p and q @param p -> A node of the graph @param q -> Another node in the graph @param edgedict -> A dictionary with key `(p, q)` and value `w`. @param ndict -> A dictionary with key `p` and the value a set `{p1, p2, ...}` @param params -> Should always be none here @return -> A real value representing the score """ if (len(ndict[p]) > len(ndict[q])): temp = p p = q q = temp score = 0 for elem in ndict[p]: if elem in ndict[q]: p_elem = edgedict[(p, elem)] if (p, elem) in edgedict else edgedict[(elem, p)] q_elem = edgedict[(q, elem)] if (q, elem) in edgedict else edgedict[(elem, q)] score += p_elem + q_elem degrees = params["deg"] return score / np.sqrt(degrees[p] * degrees[q])
7769bc21d6a6bf176002ea6f4020cbe78f971b84
3,653,991
def prompt_user_friendly_choice_list(msg, a_list, default=1, help_string=None): """Prompt user to select from a list of possible choices. :param msg:A message displayed to the user before the choice list :type msg: str :param a_list:The list of choices (list of strings or list of dicts with 'name' & 'desc') "type a_list: list :param default:The default option that should be chosen if user doesn't enter a choice :type default: int :returns: The list index of the item chosen. """ verify_is_a_tty() options = '\n'.join([' [{}] {}{}' .format(i + 1, x['name'] if isinstance(x, dict) and 'name' in x else x, ' - ' + x['desc'] if isinstance(x, dict) and 'desc' in x else '') for i, x in enumerate(a_list)]) allowed_vals = list(range(1, len(a_list) + 1)) linesToDelete = len(a_list) + 1 while True: val = _input('{}\n{}\nPlease enter a choice [Default choice({})]: '.format(msg, options, default)) if val == '?' and help_string is not None: for x in range(0, linesToDelete): delete_last_line() print('Please enter a choice [Default choice({})]: {}'.format(default, '?')) print(help_string) continue if not val: val = '{}'.format(default) try: ans = int(val) if ans in allowed_vals: for x in range(0, linesToDelete): delete_last_line() print('Please enter a choice [Default choice({})]: {}'.format(default, a_list[ans - 1])) # array index is 0-based, user input is 1-based return ans - 1 raise ValueError except ValueError: for x in range(0, linesToDelete): delete_last_line() print('Please enter a choice [Default choice({})]: {}'.format(default, val)) logger.warning('Valid values are %s', allowed_vals)
d2c81b8af3f2de3203dd8cfd11372909e2e9cbe3
3,653,992
import typing import json def fuse(search: typing.Dict, filepath: str): """Build a JSON doc of your pages""" with open(filepath, "w") as jsonfile: return json.dump( [x for x in _build_index(search, id_field="id")], fp=jsonfile, )
542aff31a2861bc8de8a25025582305db0ce2af1
3,653,993
def aggregate_gradients_using_copy_with_device_selection( tower_grads, avail_devices, use_mean=True, check_inf_nan=False): """Aggregate gradients, controlling device for the aggregation. Args: tower_grads: List of lists of (gradient, variable) tuples. The outer list is over towers. The inner list is over individual gradients. use_mean: if True, mean is taken, else sum of gradients is taken. check_inf_nan: If true, check grads for nans and infs. Returns: The tuple ([(average_gradient, variable),], has_nan_or_inf) where the gradient has been averaged across all towers. The variable is chosen from the first tower. The has_nan_or_inf indicates the grads has nan or inf. """ agg_grads = [] has_nan_or_inf_list = [] for i, single_grads in enumerate(zip(*tower_grads)): with tf.device(avail_devices[i % len(avail_devices)]): grad_and_var, has_nan_or_inf = aggregate_single_gradient( single_grads, use_mean, check_inf_nan) agg_grads.append(grad_and_var) has_nan_or_inf_list.append(has_nan_or_inf) return agg_grads
24d12a4e3ee63b96dd453bc901a9180ed956003b
3,653,994
def ToOrdinal(value): """ Convert a numerical value into an ordinal number. @param value: the number to be converted """ if value % 100//10 != 1: if value % 10 == 1: ordval = '{}st'.format(value) elif value % 10 == 2: ordval = '{}nd'.format(value) elif value % 10 == 3: ordval = '{}rd'.format(value) else: ordval = '{}th'.format(value) else: ordval = '{}th'.format(value) return ordval
774bac5fd22714ba3eb4c9dd2b16f4236e2f5e8c
3,653,995
from typing import List def compute_partition(num_list: List[int]): """Compute paritions that add up.""" solutions = [] for bits in helper.bitprod(len(num_list)): iset = [] oset = [] for idx, val in enumerate(bits): (iset.append(num_list[idx]) if val == 0 else oset.append(num_list[idx])) if sum(iset) == sum(oset): solutions.append(bits) return solutions
408f5f85b1648facdcebfd47f96f53221b54888e
3,653,996
def renumber(conllusent): """Fix non-contiguous IDs because of multiword tokens or removed tokens""" mapping = {line[ID]: n for n, line in enumerate(conllusent, 1)} mapping[0] = 0 for line in conllusent: line[ID] = mapping[line[ID]] line[HEAD] = mapping[line[HEAD]] return conllusent
30f336cd63e7aff9652e6e3d1a35a21dc3379c99
3,653,997
def recall_at(target, scores, k): """Calculation for recall at k.""" if target in scores[:k]: return 1.0 else: return 0.0
0c3f70be3fb4cfde16d5e39b256e565f180d1655
3,653,998
def supports_dynamic_state() -> bool: """Checks if the state can be displayed with widgets. :return: True if widgets available. False otherwise. """ return widgets is not None
bee2f32bb315f086b6bd8b75535eb8fdde36a188
3,653,999
def create_partial_image_rdd_decoder(key_type): """Creates a partial, tuple decoder function. Args: value_type (str): The type of the value in the tuple. Returns: A partial :meth:`~geopyspark.protobufregistry.ProtoBufRegistry.image_rdd_decoder` function that requires ``proto_bytes`` to execute. """ return partial(image_rdd_decoder, key_decoder=key_type)
2df5c506cf8603e9e4acbb4ccb77f8f5d830fe82
3,654,000
import json def to_json(simple_object): """ Serializes the ``simple_object`` to JSON using the EnhancedJSONEncoder above. """ return json.dumps(simple_object, cls=EnhancedJSONEncoder)
c9f8c9474210661a7b63924a72442014c831e170
3,654,001
def bootstrap_metadata(): """ Provides cluster metadata which includes security modes """ return _metadata_helper('bootstrap-config.json')
ec2294606446a9b78a603826fca6f447ed2d9bb9
3,654,002
def unique_slug(*, title: str, new_slug: str = None) -> str: """Create unique slug. Args: title: The text where the slug will be generate. new_slug: Custom slug to hard code. Returns: The created slug or hard code slug """ if new_slug is None: slug = slugify(title) new_slug = f"{slug}-{random_string()}" return new_slug
b4e119502edf144f8393b38a47e3fbeb25335aff
3,654,003
import math def dcg(r, k=None): """The Burges et al. (2005) version of DCG. This is what everyone uses (except trec_eval) :param r: results :param k: cut-off :return: sum (2^ y_i - 1) / log (i +2) """ result = sum([(pow(2, rel) - 1) / math.log(rank + 2, 2) for rank, rel in enumerate(r[:k])]) return result
d93c500ba55411807570c8efebdeaa49ce7fe288
3,654,004
async def detect_objects(computervision_client, image_url): """Detect objects from a remote image""" detect_objects_results_local = \ computervision_client.detect_objects(image_url) return detect_objects_results_local.objects
9adb2a3b2c08f99187159ad6a22047bbf3d4c30a
3,654,005
def rgb(r=None, g=None, b=None, smooth=True, force=True): """ Set RGB values with PWM signal :param r: red value 0-1000 :param g: green value 0-1000 :param b: blue value 0-1000 :param smooth: runs colors change with smooth effect :param force: clean fade generators and set color :return: verdict string """ def __buttery(r_from, g_from, b_from, r_to, g_to, b_to): step_ms = 2 interval_sec = 0.3 if Data.RGB_CACHE[3] == 0: # Turn from OFF to on (to colors) r_from, g_from, b_from = 0, 0, 0 Data.RGB_CACHE[3] = 1 r_gen = transition(from_val=r_from, to_val=r_to, step_ms=step_ms, interval_sec=interval_sec) g_gen = transition(from_val=g_from, to_val=g_to, step_ms=step_ms, interval_sec=interval_sec) b_gen = transition(from_val=b_from, to_val=b_to, step_ms=step_ms, interval_sec=interval_sec) for _r in r_gen: Data.RGB_OBJS[0].duty(_r) Data.RGB_OBJS[1].duty(g_gen.__next__()) Data.RGB_OBJS[2].duty(b_gen.__next__()) sleep_ms(step_ms) __RGB_init() if force and Data.FADE_OBJS[0]: Data.FADE_OBJS = (None, None, None) # Dynamic input handling: user/cache r = Data.RGB_CACHE[0] if r is None else r g = Data.RGB_CACHE[1] if g is None else g b = Data.RGB_CACHE[2] if b is None else b # Set RGB channels if smooth: __buttery(r_from=Data.RGB_CACHE[0], g_from=Data.RGB_CACHE[1], b_from=Data.RGB_CACHE[2], r_to=r, g_to=g, b_to=b) else: Data.RGB_OBJS[0].duty(int(r)) Data.RGB_OBJS[1].duty(int(g)) Data.RGB_OBJS[2].duty(int(b)) # Save channel duties if LED on if r > 0 or g > 0 or b > 0: Data.RGB_CACHE = [r, g, b, 1] else: Data.RGB_CACHE[3] = 0 # Save state machine (cache) __persistent_cache_manager('s') return status()
5d5455a785a719e5252c3333e45c06352e8769ed
3,654,006
from .core.configs import get_configs from .core.configs import set_configs from .core.configs import del_configs def handle_config(args, configs): """Handle `view` subcommand :param args: parsed arguments :type args: `argparse.Namespace` :param configs: configurations object :type configs: ``sfftk.core.configs.Configs`` :return int status: status """ if args.config_subcommand == "get": return get_configs(args, configs) elif args.config_subcommand == "set": return set_configs(args, configs) elif args.config_subcommand == "del": return del_configs(args, configs)
60fb9b289e99369a9f83bdf675a793fe85191257
3,654,007
from typing import Dict from typing import List def filter_safe_actions( action_shield: Dict[int, Dict[ActionData, int]], energy: int, bel_supp_state: int ) -> List[ActionData]: """Utility function to filter actions according to required energy for them with given action shield. Parameters ---------- action_shield : List[Tuple[int, ActionData]] List of pairs of minimum energy and action for which it is required. energy : int Available energy. bel_supp_state : int State in belief support cmdp to filter actions by. Returns ------- List[ActionData] List of available actions for given energy and given belief support cmdp state. """ return [ action for action, min_energy in action_shield[bel_supp_state].items() if min_energy <= energy ]
e62b24233792d4decca1bd853b6344a8541882be
3,654,008
def fcard(card): """Create format string for card display""" return f"{card[0]} {card[1]}"
ca3866011b418bf35e1b076afd7134926a9382f9
3,654,010
def resetChapterProgress(chapterProgressDict, chapter, initRepeatLevel): """This method resets chapter progress and sets initial level for repeat routine. Args: chapterProgressDict (dict): Chapter progress data. chapter (int): Number of the chapter. initRepeatLevel (int): Initial level for repeat routine. Returns: dictionary: Return Reseted chapter progress dictionary with initial level set. """ chapterProgressDict[chapter]["status"] = "Not started" chapterProgressDict[chapter]["progress"]["current"] = 0 chapterProgressDict[chapter]["correct"] = {"correct":0, "subtotal":0, "rate":''} chapterProgressDict[chapter]["repeatLevel"] = initRepeatLevel return chapterProgressDict
e02d6e97f556a2c080c2bc273255aacedf7bb086
3,654,011
def aStarSearch(problem, heuristic=nullHeuristic): """Search the node that has the lowest combined cost and heuristic first.""" "*** YOUR CODE HERE ***" priorityqueue = util.PriorityQueue() priorityqueue.push( (problem.getStartState(), [], 0), heuristic(problem.getStartState(), problem) ) checkedstates = [] while not priorityqueue.isEmpty(): state, actions, curCost = priorityqueue.pop() if problem.isGoalState(state): return actions if(not state in checkedstates): checkedstates.append(state) for child in problem.getSuccessors(state): point = child[0] direction = child[1] cost = child[2] g = curCost + cost heuristic_cost = heuristic(point, problem) sum_g_heuristic = g + heuristic_cost priorityqueue.push((point, actions + [direction], g), sum_g_heuristic) # util.raiseNotDefined()
0d84de971424d82020b48e35443bfe92cc2665d0
3,654,012
def gen_flag(p1=0.5, **_args): """ Generates a flag. :param p1: probability of flag = 1 :param _args: :return: flag """ return 1 if np.random.normal(0, 100) <= p1 * 100 else 0
0accef3f2fd03c4918b52db2f6c72d405243af87
3,654,013
from typing import OrderedDict import re def load_migrations(path): """ Given a path, load all migrations in that path. :param path: path to look for migrations in :type path: pathlib.Path or str """ migrations = OrderedDict() r = re.compile(r'^[0-9]+\_.+\.py$') filtered = filter( lambda x: r.search(x) is not None, [x.name for x in path.iterdir()] ) for migration in sorted(filtered): migration_id = migration[:-3] migrations[migration_id] = { 'id': migration_id, 'commit_time': None, 'status': 0, 'module': load_module(migration_id, path / migration), 'table': None } return migrations
5bd761e6a9ffab4aa08a473e8d2c667e7fb87813
3,654,014
def filterkey(e, key, ns=False): """ Gibt eine Liste aus der Liste C{e} mit dem Attribut C{key} zurück. B{Beispiel 1}: Herauslesen der SRS aus einer Liste, die C{dict}'s enthält. >>> e = [{'SRS':'12345', 'Name':'WGS-1'}, {'SRS':'54321', 'Name':'WGS-2'}] >>> key = "SRS" >>> filterkey(e, key) ['12345', '54321'] B{Beispiel 2}: Herauslesen des Namens aus einer Liste, die C{dict}'s enthält. >>> e = [{'SRS':'12345', 'Name':'WGS-1'}, {'SRS':'54321', 'Name':'WGS-2'}] >>> key = "Name" >>> filterkey(e, key) ['WGS-1', 'WGS-2'] @param e: Liste @type e: list @param key: Schlüssel @type key: str @param ns: Status, ob zusätzlich L{_helpers.ns} verwendet werden soll @type ns: bool @return: Liste mit den gefundenen Attributen C{key} @rtype: list """ l = [] key_split = key.split("=") if isinstance(e, list): for i in e: if len(key_split)>1: if i[key_split[0]] == key_split[1]: if ns: l.append(_ns(i[key_split[0]])) else: l.append(i[key_split[0]]) else: if ns: l.append(_ns(i[key])) else: l.append(i[key]) return l
f56967e9623622d2dffdb9fe6f128893df9ad798
3,654,015
def on_coordinator(f): """A decorator that, when applied to a function, makes a spawn of that function happen on the coordinator.""" f.on_coordinator = True return f
d9c97c47255d165c67a4eb67a18cc85c3c9b9386
3,654,018
def redirect_subfeed(match): """ URL migration: my site used to have per-category/subcategory RSS feeds as /category/path/rss.php. Many of the categories have Path-Aliases in their respective .cat files, but some of the old subcategories no longer apply, so now I just bulk-redirect all unaccounted-for subcategories to the top-level category's feed. """ return flask.url_for( 'category', category=match.group(1), template='feed'), True
101509cbf75ce9be5307edd265988cca662a7880
3,654,019
from typing import List from typing import Tuple import random def _train_test_split_dataframe_strafified(df:pd.DataFrame, split_cols:List[str], test_ratio:float=0.2, verbose:int=0, **kwargs) -> Tuple[pd.DataFrame, pd.DataFrame]: """ ref. the function `train_test_split_dataframe` """ df_inspection = df[split_cols] for item in split_cols: all_entities = df_inspection[item].unique().tolist() entities_dict = {e: str(i) for i, e in enumerate(all_entities)} df_inspection[item] = df_inspection[item].apply(lambda e:entities_dict[e]) inspection_col_name = "Inspection" * (max([len(c) for c in split_cols])//10+1) df_inspection[inspection_col_name] = df_inspection.apply( func=lambda row: "-".join(row.values.tolist()), axis=1 ) item_names = df_inspection[inspection_col_name].unique().tolist() item_indices = { n: df_inspection.index[df_inspection[inspection_col_name]==n].tolist() for n in item_names } if verbose >= 1: print("item_names = {}".format(item_names)) for n in item_names: random.shuffle(item_indices[n]) test_indices = [] for n in item_names: item_test_indices = item_indices[n][:int(round(test_ratio*len(item_indices[n])))] test_indices += item_test_indices if verbose >= 2: print("for the item `{}`, len(item_test_indices) = {}".format(n, len(item_test_indices))) df_test = df.loc[df.index.isin(test_indices)].reset_index(drop=True) df_train = df.loc[~df.index.isin(test_indices)].reset_index(drop=True) return df_train, df_test
82c4f2a1da3d7e7a4a854c037ab209dffe01f5b2
3,654,020
def index(): """ This route will render a template. If a query string comes into the URL, it will return a parsed dictionary of the query string keys & values, using request.args """ args = None if request.args: args = request.args return render_template("public/index.html", args=args) return render_template("public/index.html", args=args)
ae76de55fb9263264d87447fc2fe173ad62e3245
3,654,021
from typing import Type from typing import List def multi_from_dict(cls: Type[T_FromDict], data: dict, key: str) -> List[T_FromDict]: """ Converts {"foo": [{"bar": ...}, {"bar": ...}]} into list of objects using the cls.from_dict method. """ return [cls.from_dict(raw) for raw in data.get(key, [])]
44b9aa28d93f24cc76cddf3cba9fbcbcde937d3d
3,654,022
import inspect def infer_signature(func, class_name=''): """Decorator that infers the signature of a function.""" # infer_method_signature should be idempotent if hasattr(func, '__is_inferring_sig'): return func assert func.__module__ != infer_method_signature.__module__ try: funcfile = get_defining_file(func) funcsource, sourceline = inspect.getsourcelines(func) sourceline -= 1 # getsourcelines is apparently 1-indexed except: return func funcid = (class_name, func.__name__, funcfile, sourceline) func_source_db[funcid] = ''.join(funcsource) try: func_argid_db[funcid] = getfullargspec(func) vargs_name, kwargs_name = func_argid_db[funcid][1], func_argid_db[funcid][2] except TypeError: # Not supported. return func def wrapper(*args, **kwargs): global is_performing_inference # If we're already doing inference, we should be in our own code, not code we're checking. # Not doing this check sometimes results in infinite recursion. if is_performing_inference: return func(*args, **kwargs) expecting_type_error, got_type_error, got_exception = False, False, False is_performing_inference = True try: callargs = getcallargs(func, *args, **kwargs) # we have to handle *args and **kwargs separately if vargs_name: va = callargs.pop(vargs_name) if kwargs_name: kw = callargs.pop(kwargs_name) arg_db = {arg: infer_value_type(value) for arg, value in callargs.items()} # *args and **kwargs need to merge the types of all their values if vargs_name: arg_db[vargs_name] = union_many_types(*[infer_value_type(v) for v in va]) if kwargs_name: arg_db[kwargs_name] = union_many_types(*[infer_value_type(v) for v in kw.values()]) except TypeError: got_exception = expecting_type_error = True except: got_exception = True finally: is_performing_inference = False try: ret = func(*args, **kwargs) except TypeError: got_type_error = got_exception = True raise except: got_exception = True raise finally: if not got_exception: assert not expecting_type_error # if we didn't get a TypeError, update the actual database for arg, t in arg_db.items(): update_db(func_arg_db, (funcid, arg), t) # if we got an exception, we don't have a ret if not got_exception: is_performing_inference = True try: type = infer_value_type(ret) update_db(func_return_db, funcid, type) except: pass finally: is_performing_inference = False return ret if hasattr(func, '__name__'): wrapper.__name__ = func.__name__ wrapper.__is_inferring_sig = True return wrapper
e1ab5d9850b4a3026ecd563324026c9cd1675d31
3,654,023
def field_wrapper(col): """Helper function to dynamically create list display method for :class:`ViewProfilerAdmin` to control value formating and sort order. :type col: :data:`settings.ReportColumnFormat` :rtype: function """ def field_format(obj): return col.format.format(getattr(obj, col.attr_name)) field_format.short_description = col.name field_format.admin_order_field = col.attr_name return field_format
0a41b5462a6905af5d1f0cc8b9f2bdd00206e6bd
3,654,024
def is_isotropic(value): """ Determine whether all elements of a value are equal """ if hasattr(value, '__iter__'): return np.all(value[1:] == value[:-1]) else: return True
9c99855d53cf129931c9a9cb51fc491d2fe0df21
3,654,025
def grouper(iterable, n, fillvalue=None): """Iterate over a given iterable in n-size groups.""" args = [iter(iterable)] * n return zip_longest(*args, fillvalue=fillvalue)
26adffa4a3c748defe3732d5b94358dda20d095c
3,654,026
def format_gro_coord(resid, resname, aname, seqno, xyz): """ Print a line in accordance with .gro file format, with six decimal points of precision Nine decimal points of precision are necessary to get forces below 1e-3 kJ/mol/nm. @param[in] resid The number of the residue that the atom belongs to @param[in] resname The name of the residue that the atom belongs to @param[in] aname The name of the atom @param[in] seqno The sequential number of the atom @param[in] xyz A 3-element array containing x, y, z coordinates of that atom """ return "%5i%-5s%5s%5i % 13.9f % 13.9f % 13.9f" % (resid,resname,aname,seqno,xyz[0],xyz[1],xyz[2])
ceeeeeafe4f7484fa17ee4ebd79363209c8f7391
3,654,027
def return_as_list(ignore_nulls: bool = False): """ Enables you to write a list-returning functions using a decorator. Example: >>> def make_a_list(lst): >>> output = [] >>> for item in lst: >>> output.append(item) >>> return output Is equivalent to: >>> @return_as_list() >>> def make_a_list(lst): >>> for item in lst: >>> yield item Essentially a syntactic sugar for @for_argument(returns=list) :param ignore_nulls: if True, then if your function yields None, it won't be appended. """ def outer(fun): @wraps(fun) def inner(*args, **kwargs): output = [] for item in fun(*args, **kwargs): if item is None and ignore_nulls: continue output.append(item) return output return inner return outer
5f5f089e5664ffbbd5d78a71bf984909e677bcc5
3,654,028
def assemble_insert_msg_content(row, column, digit): """Assemble a digit insertion message.""" return str(row) + CONTENT_SEPARATOR + str(column) + CONTENT_SEPARATOR + str(digit)
5c4a40aedf4569a8f12793356c2cbedecf32d839
3,654,030
from typing import Union def get_difference_of_means(uni_ts: Union[pd.Series, np.ndarray]) -> np.float64: """ :return: The absolute difference between the means of the first and the second halves of a given univariate time series. """ mid = int(len(uni_ts) / 2) return np.abs(get_mean(uni_ts[:mid]) - get_mean(uni_ts[mid:]))
5e7e709afffde843f3f1be7941c620d4f248e8b4
3,654,031
def user_collection(): """ 用户的收藏页面 1. 获取参数 - 当前页 2. 返回数据 - 当前页 - 总页数 - 每页的数据 :return: """ user = g.user if not user: return "请先登录" # 获取当前页 page = request.args.get('p', 1) page_show = constants.USER_COLLECTION_MAX_NEWS # 校验参数 try: page = int(page) except Exception as e: current_app.logger.error(e) # return jsonify(errno=RET.PARAMERR, errmsg="参数错误") abort(404) try: user_collection = user.collection_news.paginate(page, page_show) currentPage = user_collection.page totalPage = user_collection.pages items = user_collection.items except Exception as e: current_app.logger.error(e) # return jsonify(errno=RET.DBERR, errmsg="数据库查询出错") abort(404) user_collection_list = [] for item in items: user_collection_list.append(item.to_review_dict()) data = { 'currentPage': currentPage, 'totalPage': totalPage, 'user_collection_list': user_collection_list } return render_template("news/user_collection.html", data=data)
d5ad3a3121dfc169952cad514b3fa930662dd2af
3,654,032
def activity(*, domain, name, version): """Decorator that registers a function to `ACTIVITY_FUNCTIONS` """ def function_wrapper(func): identifier = '{}:{}:{}'.format(name, version, domain) ACTIVITY_FUNCTIONS[identifier] = func return function_wrapper
1529c19b8d1d5be02c45e87a446abf62aafc143a
3,654,033
def OEDParser(soup, key): """ The parser of Oxford Learner's Dictionary. """ rep = DicResult(key) rep.defs = parseDefs(soup) rep.examples = parseExample(soup) return rep
21b4670047e06f1251e70e09aed5da4dba0449ec
3,654,034
def X_SIDE_GAP_THREE_METHODS(data): """ Upside/Downside Gap Three Methods :param pd.DataFrame data: pandas DataFrame with open, high, low, close data :return pd.Series: with indicator data calculation results """ fn = Function('CDLXSIDEGAP3METHODS') return fn(data)
87d3ca966d13756c3cd2bf15647a4d696f1e1f02
3,654,035
def fix_legacy_database_uri(uri): """ Fixes legacy Database uris, like postgres:// which is provided by Heroku but no longer supported by SqlAlchemy """ if uri.startswith('postgres://'): uri = uri.replace('postgres://', 'postgresql://', 1) return uri
aa3aa20110b7575abf77534d08a35dccb04b731d
3,654,036
def create_need(): """ Créé le besoin en db :return: """ student = Student.query.filter_by(id_user=session['uid']).first() title = request.form.get('title') description = request.form.get('description') speaker_id = request.form.get('speaker_id') estimated_tokens = int(request.form.get('estimated_tokens')) if estimated_tokens < 0: estimated_tokens = 0 need = Need( title=title, description=description, estimated_tokens=estimated_tokens, status='En cours', id_assigned_team=student.team.id, id_assigned_speaker=speaker_id ) db.session.add(need) try: db.session.commit() except: abort(500) return redirect(url_for('get_student_dashboard'))
d8af541668818e1ed40382b4ed457ac819ab3ce6
3,654,037
def insert_rolling_mean_columns(data, column_list, window): """This function selects the columns of a dataframe according to a provided list of strings, re-scales its values and inserts a new column in the dataframe with the rolling mean of each variable in the column list and the provided window length. Params: data: original dataframe column_list: list of columns to select window: window length to calculate rolling mean """ scaler = MinMaxScaler() data[column_list] = scaler.fit_transform(data[column_list]) for var in column_list: data[var + "_RollingMean"] = data[var].rolling(window).mean() return data
fd37ca307aaa0d755cd59aa69320003d02cb677a
3,654,038
def get_url_names(): """ Получение ссылок на контент Returns: Здесь - список файлов формата *.str """ files = ['srts/Iron Man02x26.srt', 'srts/Iron1and8.srt'] return files
4ee8fdd5ab9efc04eda4bfe1205e073064030520
3,654,039
from datetime import datetime def unix_utc_now() -> int: """ Return the number of seconds passed from January 1, 1970 UTC. """ delta = datetime.utcnow() - datetime(1970, 1, 1) return int(delta.total_seconds())
b9768b60cf6f49a7cccedd88482d7a2b21cf05a2
3,654,040
from typing import Set def _preload_specific_vars(env_keys: Set[str]) -> Store: """Preloads env vars from environ in the given set.""" specified = {} for env_name, env_value in environ.items(): if env_name not in env_keys: # Skip vars that have not been requested. continue specified[env_name] = env_value return specified
6eb6c09f56235b024f15749d8ec65e8801991b43
3,654,041
import re def _format_workflow_id(id): """ Add workflow prefix to and quote a tool ID. Args: id (str): ... """ id = urlparse.unquote(id) if not re.search('^#workflow', id): return urlparse.quote_plus('#workflow/{}'.format(id)) else: return urlparse.quote_plus(id)
ea6b6f83ef430128c8a876c9758ce3d70b1bef63
3,654,042
def calc_log_sum(Vals, sigma): """ Returns the optimal value given the choice specific value functions Vals. Parameters ---------- Vals : [numpy.array] A numpy.array that holds choice specific values at common grid points. sigma : float A number that controls the variance of the taste shocks Returns ------- V : [numpy.array] A numpy.array that holds the integrated value function. """ # Assumes that NaNs have been replaced by -numpy.inf or similar if sigma == 0.0: # We could construct a linear index here and use unravel_index. V = np.amax(Vals, axis=0) return V # else we have a taste shock maxV = np.max(Vals, axis=0) # calculate maxV+sigma*log(sum_i=1^J exp((V[i]-maxV))/sigma) sumexp = np.sum(np.exp((Vals - maxV) / sigma), axis=0) LogSumV = np.log(sumexp) LogSumV = maxV + sigma * LogSumV return LogSumV
18f71725ea4ced0ea5243fb201f25ae636547947
3,654,043
def comment_create(request, post_pk): """記事へのコメント作成""" post = get_object_or_404(Post, pk=post_pk) form = CommentForm(request.POST or None) if request.method == 'POST': comment = form.save(commit=False) comment.post = post comment.save() return redirect('blog:post_detail', pk=post.pk) context = { 'form': form, 'post': post } return render(request, 'blog/comment_form.html', context)
fe6357cfcff1a522064ad9f49b030cf63a02b575
3,654,044
import torch def tensor_from_var_2d_list(target, padding=0.0, max_len=None, requires_grad=True): """Convert a variable 2 level nested list to a tensor. e.g. target = [[1, 2, 3], [4, 5, 6, 7, 8]] """ max_len_calc = max([len(batch) for batch in target]) if max_len == None: max_len = max_len_calc if max_len_calc > max_len: print("Maximum length exceeded: {}>{}".format(max_len_calc, max_len)) target = [batch[:max_len] for batch in target] padded = [batch + (max_len - len(batch)) * [padding] for batch in target] return torch.tensor(padded, requires_grad=requires_grad)
2aa5fcc5b2be683c64026126da55330937cd8242
3,654,045