content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def topLeftToCenter(pointXY, screenXY, flipY=False):
"""
Takes a coordinate given in topLeft reference frame and transforms it
to center-based coordiantes. Switches from (0,0) as top left to
(0,0) as center
Parameters
----------
pointXY : tuple
The topLeft coordinate which is to be transformed
screenXY : tuple, ints
The (x,y) dimensions of the grid or screen
flipY : Bool
If True, flips the y coordinates
Returns
-------
newPos : tuple
The (x,y) position in center-based coordinates
Examples
--------
>>> newPos = topLeftToCenter((100,100), (1920,1080), False)
>>> newPos
(-860.0, 440.0)
"""
newX = pointXY[0] - (screenXY[0] / 2.0)
newY = (screenXY[1] / 2.0) - pointXY[1]
if flipY:
newY *= -1
return newX, newY | eb3dd24fc4f26d74fa69c5988895da6fb4adca25 | 148,596 |
from bs4 import BeautifulSoup
def html(icon: str) -> BeautifulSoup:
"""Return the icon as HTML tag."""
return BeautifulSoup(icon, "html.parser") | 86d59392f7867f724f6308264759e884b9502869 | 310,857 |
def media(a, b, c):
"""Calcula a média ponderada de três notas"""
med = ((2 * a) + (3 * b) + (5 * c))/10
return print(f'MEDIA = {med:.1f}') | f40cafc8219b648a3cebeafa95f6a0cf319f47ae | 398,491 |
import string
import random
def filenameGenerator(size=12, chars=string.ascii_uppercase + string.digits + string.ascii_lowercase):
"""
Description: generate randon filename with .csv extension
Input: size (optional), chars (opional)
Return: filename
"""
return ''.join(random.choice(chars) for _ in range(size))+".csv" | dba0eb435e76eb6f3c69d0a6612fe6945fdf7035 | 165,243 |
def merge_dict(a: dict, b: dict) -> dict:
"""
Merge 2 dictionaries.
If the parent and child shares a similar key and the value of that key is a dictionary, the key will be recursively
merged. Otherwise, the child value will override the parent value.
Parameters
----------
a dict:
Parent dictionary
b dict:
Child dictionary
Returns
-------
dict
Merged dictionary
"""
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
a[key] = merge_dict(a[key], b[key])
else:
a[key] = b[key]
else:
a[key] = b[key]
return a | cc827b5d8cba295b7eeda0723a3ae2a664c296f9 | 658,415 |
def get_file_info_from_url(url: str, spec_dir: str):
"""
Using a url string we create a file name to store said url contents.
"""
# Parse a url to create a filename
spec_name = url.replace('https://raw.githubusercontent.com/', '')\
.replace('.yml', '')\
.replace('.yaml', '')\
.replace('/', '-')\
.lower()
# Get directory and full name for spec file
full_spec_name = f'{spec_name}.pickle'
spec_path = f'{spec_dir}/{spec_name}.pickle'
return spec_name, full_spec_name, spec_path | 229de0038d0b0249a68a7e4c058f76f972296fac | 652,606 |
def _indent_for_list(text, prefix=' '):
"""Indent some text to make it work as a list entry.
Indent all lines except the first with the prefix.
"""
lines = text.splitlines()
return '\n'.join([lines[0]] + [
prefix + l
for l in lines[1:]
]) + '\n' | 98da37331f46f50e775bf4676d9d45c8274ffa03 | 666,596 |
def get_swap_array(orient):
"""Gives how to reorder and flip the axes to fit scanner space
[phase, readout, slice] -> [x,y,z]
Used with np.moveaxes
Args:
orient -- procpar parameter 'orient'
Returns:
arr -- sequence of new axes
flipaxis -- axis to flip
sliceaxis -- axis of slice encoding
"""
if orient == 'trans':
arr = [0,1,2]
flipaxis = None
sliceaxis = 2
elif orient == 'trans90':
arr = [1,0,2]
flipaxis = [1]
sliceaxis = 2
elif orient == 'sag':
arr = [1,2,0]
flipaxis = None
sliceaxis = 0
elif orient == 'sag90':
arr = [2,1,0]
flipaxis = [2]
sliceaxis = 0
elif orient == 'cor':
arr = [0,2,1]
flipaxis = [1]
sliceaxis = 1
elif orient == 'cor90':
arr = [1,2,0]
arr = [2,0,1]
flipaxis = [1,2]
sliceaxis = 1
else:
raise(Exception('Other orientations not implemented'))
return arr, flipaxis, sliceaxis | c3ec0373d6219626ac4e36ad0fb25dcfd09bf35d | 493,962 |
import re
def get_libc_version(path):
"""Get the libc version.
Args:
path (str): Path to the libc.
Returns:
str: Libc version. Like '2.29', '2.26' ...
"""
content = open(path).read()
pattern = "libc[- ]([0-9]+\.[0-9]+)"
result = re.findall(pattern, content)
if result:
return result[0]
else:
return "" | fd6d505c9bfe287ec642999160888260064cd4a3 | 506,096 |
def make_year_column(economy_data):
"""
Make year column
:param economy_data: pandas economy data
:return: dataframe with year column
"""
years = list(economy_data.columns)
economy_data = economy_data.transpose()
economy_data['year'] = years
return economy_data | c1fc323221637341994ee19f48a89f12e28cb837 | 380,581 |
def convert_range_to_number_list(range_list):
"""
Returns list of numbers from descriptive range input list
E.g. ['12-14', '^13', '17'] is converted to [12, 14, 17]
Returns string with error message if unable to parse input
"""
# borrowed from [email protected]
num_list = []
exclude_num_list = []
try:
for val in range_list:
val = val.strip(' ')
if '^' in val:
exclude_num_list.append(int(val[1:]))
elif '-' in val:
split_list = val.split("-")
range_min = int(split_list[0])
range_max = int(split_list[1])
num_list.extend(range(range_min, (range_max + 1)))
else:
num_list.append(int(val))
except ValueError as exc:
return "Parse Error: Invalid number in input param 'num_list': %s" % exc
return [num for num in num_list if num not in exclude_num_list] | 31faa5b80baecccea7975237fc9796d7ca784c84 | 66,575 |
def guess_type(cube_arr):
"""Predicts the class based on the highest probability
Args:
cube_arr (np.array): cube array data
"""
return cube_arr.argmax(axis=1) | be46493cac7d9232832f02c0c726874ec05e230f | 325,245 |
from io import StringIO
def _unpack(bytes):
"""Unpack C{bytes} into a digest, keyInfo, iv, and a payload.
@return: a 4-tuple of digest, keyInfo, iv, and payload.
"""
buffer = StringIO(bytes)
digest = buffer.read(20)
keyInfo = buffer.read(4)
iv = buffer.read(16)
payload = buffer.read()
return digest, keyInfo, iv, payload | 10858aa79fc86d3c48e348e1be42e2145215b5b7 | 445,581 |
def collapse_complexes(data, conjugate_flag=False):
"""Given a list or other iterable that's a series of (real, imaginary)
pairs, returns a list of complex numbers. For instance, given this list --
[a, b, c, d, e, f]
this function returns --
[complex(a, b), complex(c, d), complex(e, f)]
The returned list is a new list; the original is unchanged.
"""
# This code was chosen for speed and efficiency. It creates an iterator
# over the original list which gets called by izip. (izip() is the same
# as the builtin zip() except that it returns elements one by one instead
# of creating the whole list in memory.)
# It's the fastest method of the 5 or 6 I tried, and I think it is also
# very memory-efficient.
# I stole it from here:
# http://stackoverflow.com/questions/4628290/pairs-from-single-list
data_iter = iter(data)
if not conjugate_flag:
tmp = [complex(r, i) for r, i in zip(data_iter, data_iter)]
else:
tmp = [complex(r, -1*i) for r, i in zip(data_iter, data_iter)]
return tmp | 12d089ebc6b1fb882e0ae32fc71740794b595f00 | 688,107 |
def all_but_last(seq):
""" Returns a new sequence containing all but the last element of the input sequence.
If the input sequence is empty, a new empty sequence of the same type should be returned.
Example:
>>> all_but_last("abcde")
"abcd"
"""
newSeq = seq * 0
for i in range(len(seq) - 1):
newSeq += seq[i]
return newSeq | f88182e8efd2c56f4cc59a4be2fce2f8c06b3b91 | 158,568 |
def parity(byte):
"""
Return True if the number of bits that are set is even.
"""
return str(bin(byte)).count('1') % 2 == 0 | 670fa6344a0941104bb5feee480b7bd4426f08c8 | 192,171 |
def find_correct_weight(program_weights, program, correction):
"""Return new weight for node."""
return program_weights[program] + correction | 994c25efef10fa37971372f444a879e816708830 | 8,998 |
def _validate_ld_matrix(ld_mat):
"""
Takes an `LDMatrix` object and checks its contents for validity.
Specifically, we check that:
- The dimensions of the matrix and its associated attributes are matching.
- The LD boundaries are correct.
- The masking is working properly.
:param ld_mat: An instance of `LDMatrix`
:return: True if `ld_mat` has the correct structure, False otherwise.
"""
attributes = ['snps', 'a1', 'maf', 'bp_position', 'cm_position', 'ld_score']
for attr in attributes:
attribute = getattr(ld_mat, attr)
if attribute is None:
continue
if len(attribute) != ld_mat.n_elements:
raise ValueError("Invalid LD Matrix: Attribute dimensions are not aligned!")
# Check LD bounds:
ld_bounds = ld_mat.get_masked_boundaries()
if ld_bounds.shape != (2, ld_mat.n_elements):
raise ValueError("Invalid LD Matrix: LD boundaries have the wrong dimensions!")
ld_block_lengths = ld_bounds[1, :] - ld_bounds[0, :]
# Iterate over the stored LD data to check its dimensions:
i = 0
for i, d in enumerate(ld_mat):
if len(d) != ld_block_lengths[i]:
raise ValueError(f"Invalid LD Matrix: Element {i} does not have matching LD boundaries!")
if i != (ld_mat.n_elements - 1):
raise ValueError(f"Invalid LD Matrix: Conflicting total number of elements!")
return True | 96b1883f369a8a5e920c8afd3c9ed5ce1c268e32 | 260,391 |
from typing import List
from typing import Tuple
def start_end_locations_from_locations(
locations: List[float],
) -> Tuple[List[float], List[float]]:
"""
Calculates the start and end times of each location
Ex) 5, 10, 15
start_times == 5, 10, 15
end_times == 10, 15, 15
Returns
-------
A tuple of start and end times
"""
start_locations = []
end_locations = []
for index, location in enumerate(locations):
start_time = location
if index == len(locations) - 1:
end_time = location
else:
end_time = locations[index + 1]
start_locations.append(start_time)
end_locations.append(end_time)
return start_locations, end_locations | ea6df1e9cbc216c2d06a5685c04936363a40c25f | 234,011 |
import re
def split_user(userid):
"""Return the user and domain parts from the given user id as a dict.
For example if userid is u'acct:[email protected]' then return
{'username': u'seanh', 'domain': u'hypothes.is'}'
:raises ValueError: if the given userid isn't a valid userid
"""
match = re.match(r'^acct:([^@]+)@(.*)$', userid)
if match:
return {
'username': match.groups()[0],
'domain': match.groups()[1]
}
raise ValueError("{userid} isn't a valid userid".format(userid=userid)) | 9419ca7a4f20b7393752476e9889a0eb62f0fbd7 | 420,226 |
def add(vec_1, vec_2):
"""
This function performs vector addition. This is a good place
to play around with different collection types (list, tuple, set...),
:param vec_1: a subscriptable collection of length 3
:param vec_2: a subscriptable collection of length 3
:return vec_3: a subscriptable collection of length 3
"""
# add two vectors
vec_3 = [float(vec_1[0]) + float(vec_2[0]), float(vec_1[1]) + float(vec_2[1]), float(vec_1[2]) + float(vec_2[2])]
return vec_3 | 4a17a82422cef472decb37c376e8bf5259ade60a | 707,653 |
def is_sentence(sentence):
"""
Evaluates if the input is a sentence (more than one word)
"""
return len(sentence.split(' ')) > 1 | 1eed35f0d0655edf225999687a7a10de7e613505 | 138,356 |
def parsePoint(storedPt):
"""
Translates a string of the form "{1, 0}" into a float tuple (1.0, 0.0)
"""
return tuple(float(c) for c in storedPt.strip("{}").split(",")) | e9d7f88c866fe0f504119258488df90b9e56024f | 685,098 |
def get_ffmpeg_concact(file_names):
"""Returns the file names in a formatted string for the 'ffmpeg concat' command"""
lines = ["file '{}'".format(f) for f in file_names]
return '\n'.join(lines) | 6c13d27821f212c253c9a44d276b8e9cdc48657a | 595,789 |
def get_reverse_endian(bytes_array):
"""
Reverse endianness in arbitrary-length bytes array
"""
hex_str = bytes_array.hex()
hex_list = ["".join(i) for i in zip(hex_str[::2], hex_str[1::2])]
hex_list.reverse()
return bytes.fromhex("".join(hex_list)) | eb610455fd39b9973e4c764e61c066e2952f2665 | 286,824 |
def add_three(src: int) -> int:
"""3 を足して返す。
Args:
src (int): 元の値
Returns:
int: 3 を足した値
"""
return src + 3 | a7783e9e79df90ab9baa6d70457069205b59f4b4 | 320,329 |
def define_pagination_variables(limit, offset, homophonesGroupCollection):
""" Define previous URL, next URL, total number of pages and current page based
on the limit and offset. """
if offset - limit < 0:
prevURL = None
else:
prevURL = f'/p/?limit={limit}&offset={offset - limit}'
totalDocuments = homophonesGroupCollection.count_documents({})
if offset + limit >= totalDocuments:
nextURL = None
else:
nextURL = f'/p/?limit={limit}&offset={offset + limit}'
totalPages = (totalDocuments // limit) + 1
currentPage = (offset // limit) + 1
return (prevURL, nextURL, totalPages, currentPage) | bfd370457b2c5dd6683be7198d608162720e6ee7 | 566,318 |
def find_parameter(env, param, role=""):
"""
Find a parameter in an environment map and return it.
If paramter is not found return 0.
Supports role parameters too. E.g. given the following
inside of env, with param=CephHciOsdCount and role="",
this function returns 3. But if role=ComputeHCI, then
it would return 4.
CephHciOsdCount: 3
ComputeHCIParameters:
CephHciOsdCount: 4
"""
role_parameters = role + 'Parameters'
if role_parameters in env and param in env[role_parameters]:
return env[role_parameters][param]
elif param in env:
return env[param]
return 0 | 507862516cb99e3e07354c027dc373d24de879ee | 608,428 |
def parse_input_args(input_str: str):
"""
Utility to parse input string arguments. Returns a dictionary
"""
output_dict = {}
if not input_str:
raise ValueError("Empty input string: {}".format(input_str))
key_pairs: list = input_str.split(",")
key_pairs = [x.strip() for x in key_pairs]
if not key_pairs:
raise ValueError("Incorrect format: {}".format(input_str))
for each_key in key_pairs:
try:
key, value = each_key.split("=")
except ValueError as value_error:
raise ValueError("Expected input format "
"'key1=value1, key2=value2' "
"but received {}".format(input_str)) \
from value_error
if value.isdigit():
value = int(value)
output_dict[key] = value
return output_dict | bd49087607ca223b1bb4de4261a71242416ce616 | 390,109 |
import re
def extract_tarball_url(_tail):
"""
Extract the tarball URL for missing user code if possible from stdout tail.
:param _tail: tail of payload stdout (string).
:return: url (string).
"""
tarball_url = "(source unknown)"
if "https://" in _tail or "http://" in _tail:
pattern = r"(https?\:\/\/.+)"
found = re.findall(pattern, _tail)
if found:
tarball_url = found[0]
return tarball_url | 97d8d2e74a3b152bd1fe822b1501e01a8331c30c | 174,659 |
import re
def file_sort(my_list):
"""
Sort a list of files in a nice way.
eg item-10 will be after item-9
"""
def alphanum_key(key):
"""
Split the key into str/int parts
"""
return [int(s) if s.isdigit() else s for s in re.split("([0-9]+)", key)]
my_list.sort(key=alphanum_key)
return my_list | 55e59aa0166769278f0911c3c784079557f7c162 | 352,779 |
def is_npy(s):
"""
Filter check for npy files
:param str s: file path
:returns: True if npy
:rtype: bool
"""
if s.find(".npy") == -1:
return False
else:
return True | 28263f183974b64762a69e3e41d57e92265175b5 | 570,903 |
def _get_db_table_for_model(model):
"""
Return table name in database server for passed model.
"""
return model._meta.db_table | 14607fd11ae308b371af45232467bbe13fe907ac | 646,617 |
def build_conflicting_scores_string(conflicting_scores):
"""Builds a printable string from a conflicting scores list"""
string_builder = ""
for i in conflicting_scores:
string_builder += f"\n\"{i[0].name}\" match score: {i[1]}"
return string_builder | 7677e3e412971ee4ba35208a58dbd5ae0f51961e | 548,653 |
def Notas(*valores, sit=False):
"""A função consiste de dois parâmetros:
-Valores: indica as notas do aluno, cuja quantidade é variada.
-Sit: indica se a situação deverá ou não ser mostrada (é falsa por padrão).
A função retorna um dicionário contendo a maior e a menor nota, bem como a média e, por fim, a situação (caso seja ativada)."""
soma = 0
valores = sorted(valores)
info = dict()
info['Total'] = len(valores)
info['Maior'] = valores[-1]
info['Menor'] = valores[0]
for c in valores:
soma += c
media = float(soma / len(valores))
info['Média'] = media
if sit == True:
if media < 5:
info['Situação'] = 'Ruim'
elif 5 <= media < 7:
info['Situação'] = 'Razoável'
else:
info['Situação'] = 'Boa'
return info | e018804eac02e3146c53ebcd4cfd9c0dc10fd8e3 | 522,340 |
def strip_port_from_host(host):
"""
Strips port number from host
"""
return host.split(':')[0] | 033b5e1725e85b9f6945bdc114c3ae4309ef78a8 | 111,188 |
def _block_prepend(string, val):
"""Prepend val to each line of string."""
return "\n".join(
map(lambda l: val+l, string.split("\n"))
) | 915086d46e06e3cdc3daeb51374f3ebf178ace37 | 236,877 |
def find_pport(sys_w, physloc):
"""Find an SR-IOV physical port based on its location code.
:param sys_w: pypowervm.wrappers.managed_system.System wrapper of the host.
:param physloc: Physical location code string (per SRIOV*PPort.loc_code) of
the SR-IOV physical port to find.
:return: SRIOVEthPPort or SRIOVConvPPort wrapper with the specified
location code, or None if no such port exists in sys_w.
"""
for sriov in sys_w.asio_config.sriov_adapters:
for pport in sriov.phys_ports:
if pport.loc_code == physloc:
return pport
return None | 045d9e7e508f3111ac7da70710f7a2e5e38249de | 231,525 |
def drop_id_prefixes(item):
"""Rename keys ending in 'id', to just be 'id' for nested dicts.
"""
if isinstance(item, list):
return [drop_id_prefixes(i) for i in item]
if isinstance(item, dict):
return {
'id' if k.endswith('id') else k: drop_id_prefixes(v)
for k, v in item.items()
}
return item | 6fc752fa49771a0fc6e7e28e889cf29941a95a10 | 47,696 |
def primitives_usages(request):
"""Fixture to return possible cases of promitives use cases."""
return request.param | a36f59d124a20f73d19ea63df85f919b9b5681f3 | 74,307 |
def block_properties(img_size, blk_size):
"""Calculates properties for sliding window processing
:param img_size: Size of input image
:param blk_size: Size of individual blocks
:return: Number of rows and cols of the resulting matrix
"""
map_rows = int(img_size[0] / blk_size)
map_cols = int(img_size[1] / blk_size)
return map_rows, map_cols | 365159f1c1b6ecbf8118f6523976afd442b6c9b1 | 220,344 |
def almost_equal(a, b, rel_tol=1e-09, abs_tol=0.0):
"""A function for testing approximate equality of two numbers.
Same as math.isclose in Python v3.5 (and newer)
https://www.python.org/dev/peps/pep-0485
"""
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol) | d2f9091216c69421fdef9c6d26804a157e731289 | 522,947 |
import base64
import json
import time
import hmac
import hashlib
def extractAndValidateBody(
body: str,
key: str = "",
signature: str = "",
isBase64: bool = False,
with_validate: bool = True,
) -> dict:
"""
Basic parsing of the body, including optional validation of a HMAC, to a dict
>>> t = int(time.time())
>>> valid_body = f'{{ "subnet": "123", "sg": "456", "repo": "789", "time": {t} }}'
>>> valid_b64b = base64.b64encode(valid_body.encode("utf-8")).decode("utf-8")
>>> test1 = extractAndValidateBody(valid_b64b, isBase64=True, with_validate=False)
>>> test1.pop("time") != "0"
True
>>> test1
{'subnet': '123', 'sg': '456', 'repo': '789'}
>>> test2 = extractAndValidateBody(valid_body, with_validate=False)
>>> test2.pop("time") != "0"
True
>>> test2
{'subnet': '123', 'sg': '456', 'repo': '789'}
>>> kinda_valid = f'{{ "subnet": "123", "sg": "456", "repo": "789", "time": {t} }}'
>>> test3 = extractAndValidateBody(kinda_valid, with_validate=False)
>>> test3.pop("time") != "0"
True
>>> test3
{'subnet': '123', 'sg': '456', 'repo': '789'}
>>> with open('tests/fixtures/example.json') as json_file:
... example = json.load(json_file)
>>> example["body"] = example["body"].replace("111", str(t))
>>> test4 = extractAndValidateBody(example["body"], with_validate=False)
>>> test4.pop("time") != "0"
True
>>> test4
{'subnet': '123', 'sg': '456', 'repo': '789'}
>>> key = "abcdefg"
>>> h = hmac.new(key.encode("utf-8"), valid_body.encode("utf-8"), hashlib.sha512)
>>> test5 = extractAndValidateBody(valid_body, key=key, signature=h.hexdigest())
>>> test5.pop("time") != "0"
True
>>> test5
{'subnet': '123', 'sg': '456', 'repo': '789'}
>>> try:
... extractAndValidateBody(key="12345", body="{}")
... except Exception as e:
... print(e)
key or signature missing
>>> try:
... extractAndValidateBody('{"subnet": "123", "sg": "456", "repo": "789", "time": 1015213801}', with_validate=False)
... except Exception as e:
... print(e)
request expired
"""
if with_validate and (not key or not signature):
raise Exception("key or signature missing")
if isBase64:
dec_body = base64.b64decode(body.encode("utf-8"))
body = dec_body.decode("utf-8")
body_qs = json.loads(body)
if not all(x in body_qs for x in ["time"]):
raise Exception("missing required body item")
requestTime = int(body_qs["time"])
# less than 30 seconds old
if (int(time.time()) - requestTime) >= 30:
raise Exception(f"request expired")
if with_validate:
key_bytes = None
if not key:
raise Exception("Key not valid")
else:
key_bytes = key.encode("utf-8")
h = hmac.new(key_bytes, body.encode("utf-8"), hashlib.sha512)
res = h.hexdigest()
if res == signature:
return body_qs
else:
raise Exception("Bad signature")
return body_qs | 7e8d514c368c58fc0fd8a61fe065b4166666d31f | 530,412 |
import numbers
def _time2sec(time):
"""Convert string (e.g. 1d or 0.5h) to seconds"""
if not isinstance(time, numbers.Number):
time, unit = float(time[:-1]), time[-1]
assert unit in 'dh'
time *= 24 * 3600 if unit == 'd' else 3600
return time | 997b5d5fc2a6d6cc395deeadcffffb9a0bf53762 | 552,291 |
def GetKeyIdFromResourceName(name):
"""Gets the key id from a resource name. No validation is done."""
return name.split('/')[5] | 3f6d2a06adfebe684e8b1f637ebfc12417169b2a | 662,187 |
def learner_name(learner):
"""Return the value of `learner.name` if it exists, or the learner's type
name otherwise"""
return getattr(learner, "name", type(learner).__name__) | 1da28e9c918df8e3bdf994df3cf2b7bbd6146d76 | 661,879 |
import math
def elastic_easeinout(pos):
"""
Easing function for animations: Elastic Ease In & Out
"""
if pos < 0.5:
return 0.5 * math.sin(13 * math.pi * pos) * math.pow(2, 10 * ((2 * pos) - 1))
return 0.5 * (
math.sin(-13 * math.pi / 2 * ((2 * pos - 1) + 1)) * pow(2, -10 * (2 * pos - 1))
+ 2
) | a4f645de3fc46b9ef41ce6aa7645b2e7d3751b38 | 456,735 |
def print_same_line(s: str, fill_num_chars: int, done: bool = False) -> int:
"""A helper to repeatedly print to the same line.
Args:
s: The text to be printed.
fill_num_chars: This should be `0` on the first call to
print_same_line() for a series of prints to the same output line. Then
it should be the return value of the previous call to
print_same_line() repeatedly until `done` is True, at which time the
cursor will be moved to the next output line.
done: On the final call to print_same_line() for a given line of output,
pass `True` to have the cursor move to the next line.
Returns:
The number of characters that were written, which should be passed as
`fill_num_chars` on the next call. At the end of printing over the same
line, finish by calling with `done` set to true, which will move to the
next line."""
s += " " * (fill_num_chars - len(s))
if not done:
print("\r" + s, end="")
else:
print("\r" + s)
return len(s) | e9926b538473dbfba11fab0d121b58dd845e5d4c | 23,204 |
def rgb2hsv(rgb):
"""Converts RGB to HSV.
Note that H may be None when S is 0 (for grey colors).
"""
r, g, b = rgb
minimum = min(r, g, b)
maximum = max(r, g, b)
v = maximum
delta = maximum - minimum
if delta != 0:
s = delta / maximum
else:
# h is undefined
s = 0
h = None
return (h, s, v)
if r == maximum:
h = (g - b) / delta # between yellow & magenta
elif g == maximum:
h = 2 + (b - r) / delta # between cyan & yellow
else:
h = 4 + (r - g) / delta # between magenta & cyan
h *= 60 # degrees
if h < 0:
h += 360
return (h, s, v) | f3f95c7c8d8c3dc5fef54712c6d3683f7c99ca10 | 275,456 |
def check_puzzle_list(lst, n):
"""
Checks a puzzle one dimensional list and validates it.
lst : The list to be validated.
n : Puzzle type (n-puzzle).
Returns True of it's fine and False if it's not valid.
"""
# Check list's length
if len(lst) != n + 1:
return False
lst = lst[:]
lst = [0 if x == '' else x for x in lst]
# Generate a new list containing numbers from 0 to n
# and then check if the list has all of those numbers in it
new_lst = [i for i in range(0, n + 1)]
for lst_item in new_lst:
try:
lst.remove(lst_item)
except ValueError:
return False
if len(lst) != 0:
return False
return True | 3ee5f7459e48104d2798b9147fef48295f327fe4 | 562,081 |
def template_escape(text: str) -> str:
"""Returns the text escaped an ready for usage in a Jinja template
Escapes {{, {% and {#
Example
>>> template_escape("{% value }")
"{{'{%'}} value }"
Args:
text (str): The text to escape
Returns:
str: Escaped text
"""
escaped_text = (
text.replace("{{", "{{'{{'}}").replace("{%", "{{'{%'}}").replace("{#", "{{'{#'}}")
)
return escaped_text | df0d8bcdc23ca18824f70a99cc216aefc1a4d8c9 | 481,947 |
def _calc_ta(eff, n_gre, tr_gre, tr_seq, ti1, ti2, a1, a2):
"""Calculate TA for MP2RAGE sequence."""
return (2.0 * ti1 - n_gre * tr_gre) / 2.0 | 06f3891743cef4dcd7ac9ad6c87d9dae945484a8 | 379,593 |
def has_trailing_slash(p):
"""Checks if a path has a single trailing slash"""
if not p:
return False
return str(p)[-1] == '/' | a45620d801229d2f572557716057c39f35a20c17 | 194,249 |
def check_int(integer):
"""
Check if number is integer or not.
:param integer: Number as str
:return: Boolean
"""
if not isinstance(integer, str):
return False
if integer[0] in ('-', '+'):
return integer[1:].isdigit()
return integer.isdigit() | 2f020ca2b07bddcbbd72fc473a99258638e49191 | 552,189 |
import math
def safe_sqrt(val):
"""
Safely calculates the square root of a value. If the value is -1 (unknown),
then the result is -1 (unknown).
"""
return -1 if val == -1 else math.sqrt(val) | 77d1d61d81a29f226054685b2f92be1baf6bdcb6 | 594,018 |
def split_rule_matrioska(rule):
"""Split a rule for knowing which bag can contain another bag
"""
return rule.split(' bags contain ') | de06ec496f2bcf7bd8b57226be6bf11fdd11ebd1 | 408,128 |
def get_category(risk_attr):
"""
Assigns a category to a given risk attribute,
that is to a human at a given hour of a given day
Args:
risk_attr (dict): dictionnary representing a human's risk at a given time
Returns:
str: category for this person
"""
if risk_attr["test"]:
return "D"
if risk_attr["infectious"] and risk_attr["symptoms"] == 0:
return "B"
if risk_attr["infectious"] and risk_attr["symptoms"] > 0:
return "C"
if risk_attr["exposed"]:
return "A"
if risk_attr["order_1_is_tested"]:
return "J"
if risk_attr["order_1_is_symptomatic"]:
return "I"
if risk_attr["order_1_is_presymptomatic"]:
return "H"
if risk_attr["order_1_is_exposed"]:
return "E"
return "K" | 39abaccd032cdfdf294f0c1c3db662364cb52ff6 | 545,523 |
def limit_to_box(x, box):
"""select only the points within a given box."""
mf = x.copy()
for i, t in enumerate(box):
mf = mf[mf[:, i] >= t[0], :]
mf = mf[mf[:, i] <= t[1], :]
return mf | 46c5be7acaefdfffe5ead23102cba03dc856bac8 | 385,197 |
def crop_center(img, cropx, cropy, cropz):
"""
Take a center crop of the images.
If we are using a 2D model, then we'll just stack the
z dimension.
"""
x, y, z, c = img.shape
# Make sure starting index is >= 0
startx = max(x // 2 - (cropx // 2), 0)
starty = max(y // 2 - (cropy // 2), 0)
startz = max(z // 2 - (cropz // 2), 0)
# Make sure ending index is <= size
endx = min(startx + cropx, x)
endy = min(starty + cropy, y)
endz = min(startz + cropz, z)
return img[startx:endx, starty:endy, startz:endz, :] | 047144047667bafd02e5ef5691b3382391680e44 | 63,910 |
def parse_project_id(project_id):
"""Simple check for project id.
:param project_id: a numeric project id, int or string.
:return: a unified project id.
:rtype: :class:`str`
"""
try:
int(project_id)
except ValueError:
raise ValueError("Project id should be convertible to integer")
return str(project_id) | 2f5b2d737596b4df21fd6887e69e59175542feb1 | 505,916 |
def get_institution(soup):
"""Return institution name (or None, if there is no institution name)."""
institution = soup.find('div', style="line-height: 30px; \
text-align: center; margin-bottom: 1ex").find('span').find('span').text
if not institution:
institution = None
return institution | 86381acfde88cb50627a866f3877db6a47ebf1c4 | 375,898 |
from typing import Tuple
def rgb_to_int(x: Tuple[int, int, int]) -> int:
"""Return an integer from RGB tuple."""
return int(x[0] << 16 | x[1] << 8 | x[2]) | eb9eeb8e7a4347a00837effa2c731ec62b496746 | 441,551 |
import torch
def complex_conj(x: torch.Tensor) -> torch.Tensor:
"""
Complex conjugate.
This applies the complex conjugate assuming that the input array has the
last dimension as the complex dimension.
Args:
x: A PyTorch tensor with the last dimension of size 2.
Returns:
A PyTorch tensor with the last dimension of size 2.
"""
if not x.shape[-1] == 2:
raise ValueError("Tensor does not have separate complex dim.")
return torch.stack((x[..., 0], -x[..., 1]), dim=-1) | 5744be8a438cbb7848c352530d6d9c6371737ec7 | 563,008 |
import math
def exp_ce(x: float, a: float = 1) -> float:
""" Inverse of exp_utility """
return -math.log(1 - x) / a | 224164fae4d21c4dcba9d19238ab8bba74efd470 | 125,602 |
from typing import Tuple
def hex_to_rgb(hex_value: str) -> Tuple[int, int, int]:
"""
Converts a valid hexadecimal color string given by '#XXXXXX' to a tuple representing RGB values.
If the string representation does not match the above description, a ValueError is raised.
:param hex_value: String representation of hexadecimal color.
:return: Tuple of RGB color values: (R,G,B)
"""
if len(hex_value) != 7 or hex_value[0] != "#":
raise ValueError(
"the color has to be specified by '#XXXXXX'. Invalid value %s" % hex_value
)
hex_value = hex_value.lstrip("#")
try:
int(hex_value, 16)
except ValueError:
raise ValueError(
"the color value has to be a valid hexadecimal number. Invalid value %s"
% hex_value
)
return int(hex_value[0:2], 16), int(hex_value[2:4], 16), int(hex_value[4:6], 16) | 2fbbfd2958c86a8e36d5e26f58ecf566873ce79b | 467,670 |
def make_array_header(byte_string: bytes, name: str) -> str:
"""Returns the header define for an array with the given name and size."""
byte_string = bytearray(byte_string)
return 'extern const uint8_t ' + name + '[{:d}];\n'.format(len(byte_string)) | 2b4ef7b5b90122a22d462e67b81e4cf56173108a | 255,185 |
def get_angle_unit_data(sum_df, **kwargs):
"""
Function: get angle unit information from measured target positions.
Input:
- sum_df: DataFrame. processed DataFrame that contains both bundle heel and target info and growth cone length and angle info.
- kwargs: additional parameters
- 'criteria': Dataframe with Boolean values. filtering which bundles to include in the calculation.
Output:
- phi_unit: radian value of "1" in standardized coordinate.
"""
if('criteria' in kwargs.keys()):
criteria = kwargs['criteria']
sum_df = sum_df.loc[criteria, :]
# print(f"get_angle_unit_num={len(sum_df)}")
phi_unit = sum_df['aT3cT7'].mean()/2
return phi_unit | 5ec2d1c3d0ed819d3322258f1eae347424fbb7d2 | 125,589 |
def is_requirements_empty(requirements_file_path):
"""
Tests to see if a pip requirements.txt file is empty.
:param requirements_file_path: the path to the requirements.txt file
:return: True if it is empty, False otherwise
"""
fp = open(requirements_file_path)
line_count = 0
for line in fp.readlines():
line = line.strip()
#
# Skip blank lines, or comment lines
#
if not line or line.startswith('#'):
continue
line_count += 1
return line_count == 0 | b6b4820b4c771ec93ec86a466807c3d26fad315f | 184,450 |
def version_to_float(version):
"""
Convert version string to float.
"""
if version.endswith('b'):
return float(version[:-1])
return float(version) | 2dfa2003fdf7f6344ebccb00cc12c618eb863708 | 39,397 |
def as_iterable(iterable_or_scalar):
"""Utility for converting an object to an iterable.
Parameters
----------
iterable_or_scalar : anything
Returns
-------
l : iterable
If `obj` was None, return the empty tuple.
If `obj` was not iterable returns a 1-tuple containing `obj`.
Otherwise return `obj`
Notes
-----
Although string types are iterable in Python, we are treating them as not iterable in this
method. Thus, as_iterable(string) returns (string, )
Examples
---------
>>> as_iterable(1)
(1,)
>>> as_iterable([1, 2, 3])
[1, 2, 3]
>>> as_iterable("my string")
("my string", )
"""
if iterable_or_scalar is None:
return ()
elif isinstance(iterable_or_scalar, (str, bytes)):
return (iterable_or_scalar,)
elif hasattr(iterable_or_scalar, "__iter__"):
return iterable_or_scalar
else:
return (iterable_or_scalar,) | a0b19a89715e6c06c3f5dc9d66480c9c1f44ea42 | 616,064 |
def rename_key(key):
"""Rename state_dict key."""
# ResidualBlockWithStride: 'downsample' -> 'skip'
if ".downsample.bias" in key or ".downsample.weight" in key:
return key.replace("downsample", "skip")
return key | d7a5638fca107ea31d6910ce7730c4376fdeedd6 | 121,687 |
def get_picard_max_records_string(max_records: str) -> str:
"""Get the max records string for Picard.
Create the 'MAX_RECORDS_IN_RAM' parameter using `max_records`. If
`max_records` is empty, an empty string is returned.
"""
if max_records is None or max_records == "":
return ""
else:
return " MAX_RECORDS_IN_RAM=%d" % int(max_records) | f337511815b9f2d4381f868e1937393374932a3a | 585,231 |
import time
def utc_to_epoch(utc):
"""
Take UTC formatted date and return it in
millisecond accurate epoch time.
"""
timeformat = '%Y-%m-%d %H:%M:%S+00'
return int(time.mktime(time.strptime(utc, timeformat)) * 1000) | 7aac2c6e4b3429e1de129f3e96b64ae44a357bf1 | 399,810 |
def _non_blank_line_count(string):
"""
Parameters
----------
string : str or unicode
String (potentially multi-line) to search in.
Returns
-------
int
Number of non-blank lines in string.
"""
non_blank_counter = 0
for line in string.splitlines():
if line.strip():
non_blank_counter += 1
return non_blank_counter | dfa6f43af95c898b1f4763573e8bf32ddf659520 | 708,450 |
def get_count_coprime_number_count(prime_1: int, prime_2: int) -> int:
"""
get_count_coprime_number_count returns the number of coprime numbers
between prime_1 and prime_2
Args:
prime_1 (int): prime number
prime_2 (int): prime number
Returns:
int: number of coprimes in the given range
"""
return (prime_1 - 1) * (prime_2 - 1) | 437797d12205d51500bfc22478fdb4d697eeaff7 | 152,247 |
def get_id(vmfObject, idPropName='id'):
""" Returns the ID of the given VMF object. """
return int(vmfObject[idPropName]) | ffed36fe64194c5d9aeb5f154639b9f321b33a7d | 635,215 |
def get_index(x, item):
""" return the index of the first occurence of item in x """
for idx, val in enumerate(x):
if val == item:
return idx
return -1 | fc584087cd9b54ea2e42080a796c578ddbae6676 | 475,356 |
def getFolder(filename: str) -> str:
"""
Returns the folder on which the given path is stored
Args:
filename: the path of the file
Returns:
the folder of the same given file
"""
filename = filename.replace("\\", "/")
try:
filename = filename[0:-filename[::-1].index("/")]
except:
filename = ""
return filename | 3cbbeefa494ea3b4ec9454e2bf38bd32c76169b3 | 486,733 |
def modulus(angka1: float, angka2: float) -> float:
"""
hasil dari modulus antara angka1 dan angka2
>>> modulus(3.0, 2.0)
1.0
"""
return angka1 % angka2 | 13442aabaecff54ebd036f48d5c27af002584354 | 634,022 |
def merge_extras(extras1, extras2):
"""Merge two iterables of extra into a single sorted tuple. Case-sensitive"""
if not extras1:
return extras2
if not extras2:
return extras1
return tuple(sorted(set(extras1) | set(extras2))) | 0383e0e99c53844f952d919eaf3cb478b4dcd6d1 | 5,673 |
import random
import copy
def partition(n,m,minQuota,maxQuota):
"""
Function to partion a particular integer.
Parameters:
n (int) - the number we want to partition.
m (int) - the number of blocks we want.
minQuota (int) - the minimum size of a block.
maxQuota (int) - the maximum size of a block.
Returns:
result (list) - list of 'm' numbers that sum to 'n': Where no number is lesser than 'minQuota' and greater than 'maxQuota'.
"""
quotas = []
#Insert random quotas for all
while len(quotas) < m:
x = random.randint(minQuota,maxQuota)
quotas.append(x)
#Shuffle the quotas
random.shuffle(quotas)
#Fix the quotas, such that the sum is equal to n.
if sum(quotas) > n:
#If the sum of quotas is greater than n, we need to reduce some values.
#Filter values greater than minQuota and equal to minQuota
lt = copy.deepcopy([x for x in quotas if x==minQuota])
gt = copy.deepcopy([x for x in quotas if x>minQuota])
temp = list()
req = sum(quotas) - n
#Decrement the greater than values until we get the sum we want.
while req > 0:
x = random.choice(gt)
gt.remove(x)
x-=1
req -= 1
if x == minQuota:
temp.append(x)
else:
gt.append(x)
result = gt + temp + lt
elif sum(quotas) < n:
#If the sum of quotas is lesser than n, we need to increase some values.
req = n - sum(quotas)
#Filter values lesser than maxQuota and equal than maxQuota
lt2 = copy.deepcopy([x for x in quotas if x<maxQuota])
gt2 = copy.deepcopy([x for x in quotas if x==maxQuota])
temp = list()
#Increment the lesser values until we get the sum we want.
while req > 0:
x = random.choice(lt2)
lt2.remove(x)
x+=1
req-=1
if x ==maxQuota:
temp.append(x)
else:
lt2.append(x)
result = gt2 + temp + lt2
else:
#If its same, then simply return it.
result = quotas
return result | 73587a6c4544be9c282d94027c18299bcf29adfc | 630,107 |
def get_plot_title(l_table, adm_key, adm_value):
"""Determine plot title for a given area based on adm code and lookup table.
For ADM0 and ADM1 plots, uses names. For ADM2 plots, the ADM1 name is
included in addition to the ADM2 name.
Parameters
----------
l_table : pandas.DataFrame
Dataframe containing information relating different geographic
areas
adm_key : str
Admin level key
adm_value : int
Admin code value for area
Returns
-------
plot_title : str
Formatted string to use for plot title
"""
plot_title = l_table.loc[l_table[adm_key] == adm_value][adm_key + "_name"].values[0]
# If admin2-level, get admin1-level as well
if adm_key == "adm2":
admin1_name = l_table.loc[l_table[adm_key] == adm_value]["adm1_name"].values[0]
plot_title = admin1_name + " : " + plot_title
return plot_title | d26a3366a09b741cac3443b75637d308fb7719e9 | 378,887 |
def add_match_to_profile(profile, match, ismap=True, nucl=None):
"""Merge current read-gene matches to master profile.
Parameters
----------
profile : dict
Master gene profile.
match : dict
Read-gene matches.
ismap : bool, optional
Whether matches are a read-to-gene(s) map or simple counts.
nucl : str, optional
Prefix nucleotide Id to gene Ids.
See Also
--------
match_read_gene
"""
# prefix gene Id with nucleotide Id
def prefix(gene):
return '{}_{}'.format(nucl, gene) if nucl else gene
# read-to-gene(s) map
if ismap:
for ridx, genes in match.items():
for gene in genes:
profile.setdefault(prefix(gene), []).append(ridx)
# simple counts
else:
for gene, count in match.items():
gene = prefix(gene)
profile[gene] = profile.get(gene, 0) + count | 0431ec934881ccf2bb72f2367e6a1afcbcc38e5d | 685,880 |
def get_highest9(pair_list):
"""Gets the pairs with the highest 9 scores.
If there is a tie for the 9th highest score, will return all that
match it.
Assumes pair_list[0] has the lowest score and the score
is the first element of the pair.
"""
cutoff_score = pair_list[-9][0]
result = pair_list[-9:]
index = -10
while pair_list[index][0] == cutoff_score:
result.insert(0, pair_list[index])
index -= 1
return result | 5b63975d8eeb8a1ba42e501a228be789125fa58a | 432,307 |
def _get_limb_section(asm_str):
"""decode the limb and the section (h or l) from limb (e.g "4l")"""
if len(asm_str.split()) > 1:
raise SyntaxError('Unexpected separator in limb reference')
if asm_str.lower().endswith('l'):
s = 0
elif asm_str.lower().endswith('h'):
s = 1
else:
raise SyntaxError('Expecting \'l\' or \'h\' at the end of limb section reference')
limb = asm_str[:-1]
if not limb.isdigit():
raise SyntaxError('reg reference not a number')
return int(limb), s | 332a2154ac851c5a92697cde0654dca164d17c79 | 559,444 |
import struct
def _unpack_single(t, data):
"""
Unpacks and returns the first piece of data from a struct
"""
try:
return struct.unpack(t, data)[0]
except:
print(data)
raise | 325051e53ac20768f3a16b116f4cc88d4dbb6e23 | 426,640 |
from typing import Union
def concat_keypath(*args: Union[str, int]) -> str:
"""Concatenate partial keypaths and keys into a concatenated single
keypath.
Args:
*args (Union[str, int]): The partial keypaths and keys to concatenate.
Returns:
str: the concatenated keypath.
"""
retval = ''
for arg in args:
if retval != '':
retval += '.'
retval += str(arg)
return retval | ef9c2e0c8d1e6ed35071f4e6bdddc73f72161541 | 341,262 |
def get_attributes(parent, selector, attribute):
"""Get a list of attribute values for child elements of parent matching the given CSS selector"""
return [child.get(attribute) for child in parent.cssselect(selector)] | f4fecaf7aa16465a63e3e1dd062465b4cc810ad8 | 654,176 |
def default_version_splitter(instring):
"""Split the version string out of version output."""
return instring.split()[-1] | b9939fa5e0d01ce798b8184bc49a1a627f7034f0 | 599,822 |
def is_oppo_pass(event_list, team):
"""Returns whether an opponent passed the ball (likely a bad pass)"""
oppo_pass = False
for e in event_list[:1]:
if e.type_id == 1 and e.team != team and e.outcome == 0:
oppo_pass = True
return oppo_pass | 0e7d4e97a60f9e907f91caee2a7b1eeae6e52e96 | 261,876 |
def fixed_point_integer_part(fixed_point_val: int, precision: int) -> int:
"""
Extracts the integer part from the given fixed point value.
"""
if (precision >= 0):
return fixed_point_val >> precision
return fixed_point_val << precision | 5abf5c8dc9bbb0ba24099ff35fc0128c03fc791f | 621,324 |
import pickle
def load_pickle(filename, channel=-1, verbose=0):
"""
Load Joerg's pickle files.
Parameters
----------
filepath: string
The full path and name of the file to load.
channel: int
The data channel. If negative all channels are selected.
verbose: int
if > 0 show detailed error/warning messages
Returns
-------
data: 1-D or 2-D array
If channel is negative, a 2-D array with data of all channels is returned,
where first dimension is time and second dimension is channel number.
Otherwise an 1-D array with the data of that channel is returned.
samplerate: float
The sampling rate of the data in Hz.
unit: string
The unit of the data.
"""
with open(filename, 'rb') as f:
data = pickle.load(f)
if verbose > 0:
print( 'loaded %s' % filename)
time = data['time_trace']
samplerate = 1000.0 / (time[1] - time[0])
if channel >= 0:
if channel >= data.shape[1]:
raise IndexError('invalid channel number %d requested' % channel)
data = data[:, channel]
return data['raw_data'][:, channel], samplerate, 'mV'
return data['raw_data'], samplerate, 'mV' | 59c1b4f19149078cc43e6f0b171fb7e2c77013f4 | 600,419 |
def select_one(src, rel, dst):
"""
Create an SQL query that selects one ID from a
relation table given a source and destination node.
:param src: The source node.
:param rel: The relation.
:param dst: The destination node.
"""
smt = 'SELECT src FROM %s WHERE src = ? AND dst = ? LIMIT 1'
return smt % rel, (src, dst) | f8fba18226a265778b686934fdc235b9d40b732b | 273,449 |
import re
def _add_missing_tags(html_doc):
"""Fixes a issue in the orginal HTML, where the tag <Option> is not closed..
Changes from:
<Select Name=Linha class='ordenacaoSelect'>
<Option Value='510-87'>510 - AUXILIADORA
<Option Value='620-5'>520 - TRIANGULO/24 DE OUTUBRO
To:
<Select Name=Linha class='ordenacaoSelect'>
<Option Value='510-87'>510 - AUXILIADORA</Option>
<Option Value='620-5'>520 - TRIANGULO/24 DE OUTUBRO</Option>
"""
opened_option_tag = r'<Option'
closed_option_re = r'</Option>'
if re.search(closed_option_re, html_doc, re.I) is None:
html_doc = html_doc.replace(opened_option_tag, closed_option_re + opened_option_tag)
return html_doc | 54efd589e9ef1054bd9cfa9e1632b4383a6c86e3 | 220,151 |
def _build_arg_list(tokens):
"""Given a list of Tokens for a line, return an argument list. A
tag with no arguments which includes parentheses should be returned
as an empty tuple. A tag with no arguments which does not have
parentheses will be returned as None."""
try:
# Check for an opening parenthesis;
if tokens[1].type != 'LPAREN':
return None
# If we got this far, we're assuming there are args
args = []
for arg in tokens[2:]:
if arg.type == 'RPAREN':
break
if arg.type == 'SYMBOL':
args.append(arg.value)
except KeyError:
return ()
return tuple(args) | 24ae3db836487f93ba5e69d0fd9336371b527567 | 480,348 |
import pathlib
def create_file(root_path, file_name):
"""Touch file of file_name at root_path.
This function touch new empty file which name file_name at root_path.
----------
Args:
root_path(str): project rootpath.
file_name(str): file name created under project root.
Returns:
result(boolean): flag for judge file created successfully.
"""
file_path_str = root_path + "/" + file_name
file_path = pathlib.Path(file_path_str)
if file_path.exists():
print("{} if already exists.".format(file_path_str))
result = False
return result
else:
file_path.touch()
result = True
return True | 929b4fece26875864be6916d07da7f895d49acb2 | 384,869 |
def f_to_c(fahrenheit):
"""Convert to Celsius."""
return (fahrenheit - 32.0) / 1.8 | 6cc214ed0c985b71758c5be8fbcf826debe9acf7 | 528,462 |
def count_y_clusters(y):
"""
Count BGCs regions in a list of protein domain BGC states. Done by counting all consecutive 1 as one region.
:param y: List of BGC states (0 = non-BGC, 1 = BGC), one state for each protein domain
:return: Count of BGCs in given list of protein domain BGC states.
"""
prev = 0
clusters = 0
for val in y:
if val == 1 and prev == 0:
clusters += 1
prev = val
return clusters | c37936f7d7a4ca8b56eddd29c1626675a9b209cf | 366,230 |
def _chname(x):
"""
Returns changed name. Removing the last *
"""
return x[:-1] if x[-1] == '*' else x | ae9c08eb259a5be868d2b707476067e623a8e533 | 188,848 |
def _timeitlike_time_format(time_seconds, precision=3):
"""Shamelessly adapted formatting from timeit.py
Parameters
----------
time_seconds : float
The time in seconds
precision : int
The precision of the output. All digits.
Returns
-------
str
A timeit-like format (with usec, msec, sec).
"""
usec = time_seconds * 1e6
if usec < 1000:
return "%.*g usec" % (precision, usec)
else:
msec = usec / 1000
if msec < 1000:
return "%.*g msec" % (precision, msec)
else:
sec = msec / 1000
return "%.*g sec" % (precision, sec) | f81c00f1ff3c5c1f223785c24bcd181cee939a3a | 430,370 |
Subsets and Splits