content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def nbsp(text):
"""
Replace spaces in the input text with non-breaking spaces.
"""
return str(text).replace(' ', '\N{NO-BREAK SPACE}') | 74c990deb67413b7811792493b97493cb2b3c93f | 299,723 |
import time
def _init_step_result(step_object):
"""
Initializer for step_result structure.
"""
return {
'status': 'unknown',
'date': step_object.step_date,
'start_time': time.time(),
'end_time': None,
'type': step_object.step_type,
'error_info': {},
'repr': repr(step_object)
} | 94a95fed8e27a659e9dfcc88ff3a774274830faa | 592,406 |
import re
def ElementToComplexType(xsd):
"""Replaces the first <element> tag with the complexType inside of it.
The complexType tag will use the name attribute from the removed element tag.
Args:
xsd: str XSD string contents.
Returns:
str: Modified XSD.
"""
xsd = re.sub(r'<(\w+:)?element name="(\w+)">\s*<(\w+:)?complexType>',
'<\\3complexType name="\\2">',
xsd)
xsd = re.sub(r'(\s+)?</(\w+:)?element>', '', xsd)
xsd = re.sub(r'<\?xml.*?>', '', xsd)
return xsd | 5c78d07a8a900dc84512a69342ef82f00b92ed33 | 57,699 |
def getFilename(url):
"""Attempts to get the filename from the URL"""
components = url.split('/')
fname = components[-1]
if '.' in fname:
return fname
else:
return None | 7b127de42e9c67c05546c8df92b9c5f9a246b622 | 336,050 |
def complex_matrix(mat):
"""Converts real-imag interleaved real matrix into a python complex matrix."""
return mat[:,0::2] + 1j * mat[:,1::2] | c4dcb907c3cf72c5a0c7e465431f062e5f756ad7 | 257,379 |
def rgb(r, g, b):
"""
Converts RGB decimal values to hexadecimal.
:param r: integer value.
:param g: integer value.
:param b: integer value.
:return: hexadecimal version of RGB decimal numbers.
"""
return "".join(["%02X" % max(0, min(x, 255)) for x in [r, g, b]]) | 8bdf2a9e0f9597df04e75fe5edbd1c0b21c7d6e7 | 469,559 |
def gchp_metname(prior_to_13):
"""
Deterimines the correct collection name for GCHP StateMet data.
"""
if prior_to_13:
return "StateMet_avg"
return "StateMet" | 77eab1163e8a010c1d74dccdfdb12fffd09d059b | 60,183 |
def split_game_path(path):
"""Split a game path into individual components."""
# filter out empty parts that are caused by double slashes
return [p for p in path.split('/') if p] | 9b939058aa7f8b3371d3e37b0252a5a01dba4e7b | 6,998 |
def CleanGrant(grant):
"""Returns a "cleaned" grant by rounding properly the internal data.
This insures that 2 grants coming from 2 different sources are actually
identical, irrespective of the logging/storage precision used.
"""
return grant._replace(latitude=round(grant.latitude, 6),
longitude=round(grant.longitude, 6),
height_agl=round(grant.height_agl, 2),
max_eirp=round(grant.max_eirp, 3)) | 648bb0a76f9a7cfe355ee8ffced324eb6ceb601e | 8 |
def checkerboard_basis(c: complex) -> str:
"""Classifies a coordinate as X type or Z type according to a checkerboard pattern."""
return 'X' if int(c.real + c.imag) & 1 == 0 else 'Z' | 49a1a2296b45328a2ffcd04186800fd9379a1946 | 599,715 |
def get_ip_from_raw_address(raw_address: str) -> str:
"""
Return IP address from the given raw address.
>>> get_ip_from_raw_address('91.124.230.205/30')
'91.124.230.205'
>>> get_ip_from_raw_address('192.168.1.15/24')
'192.168.1.15'
"""
return raw_address.rsplit('/')[0] | 07d9fa9ae41f0c84e109d925114f56a1efccbe66 | 650,969 |
from typing import Any
import re
import importlib
def load_code(c: str) -> Any:
"""
Deserializes an object from a Python code string.
Parameters
----------
c
A string representing the object as Python code.
Returns
-------
Any
The deserialized object.
See Also
--------
dump_code
Inverse function.
"""
def _load_code(code: str, modules=None):
if modules is None:
modules = {}
try:
return eval(code, modules)
except NameError as e:
m = re.match(r"name '(?P<module>.+)' is not defined", str(e))
if m:
name = m["module"]
return _load_code(
code,
{**(modules or {}), name: importlib.import_module(name)},
)
else:
raise e
except AttributeError as e:
m = re.match(
r"module '(?P<module>.+)' has no attribute '(?P<package>.+)'",
str(e),
)
if m:
name = m["module"] + "." + m["package"]
return _load_code(
code,
{**(modules or {}), name: importlib.import_module(name)},
)
else:
raise e
except Exception as e:
raise e
return _load_code(c) | 8e85bc08d218b0847d0a9fe9d809dc0b7e630114 | 349,913 |
def timeoutDeferred(reactor, deferred, seconds):
"""
Cancel a L{Deferred} if it does not have a result available within the
given amount of time.
@see: L{Deferred.cancel}.
The timeout only waits for callbacks that were added before
L{timeoutDeferred} was called. If the L{Deferred} is fired then the
timeout will be removed, even if callbacks added after
L{timeoutDeferred} are still waiting for a result to become available.
@type reactor: L{IReactorTime}
@param reactor: A provider of L{twisted.internet.interfaces.IReactorTime}.
@type deferred: L{Deferred}
@param deferred: The L{Deferred} to time out.
@type seconds: C{float}
@param seconds: The number of seconds before the timeout will happen.
@rtype: L{twisted.internet.interfaces.IDelayedCall}
@return: The scheduled timeout call.
"""
# Schedule timeout, making sure we know when it happened:
def timedOutCall():
deferred.cancel()
delayedTimeOutCall = reactor.callLater(seconds, timedOutCall)
# If Deferred has result, cancel the timeout:
def cancelTimeout(result):
if delayedTimeOutCall.active():
delayedTimeOutCall.cancel()
return result
deferred.addBoth(cancelTimeout)
return delayedTimeOutCall | ddf45ebbedee68d9a93a6529fabd6100b55d4c33 | 129,248 |
from functools import reduce
def _calculate_num_of_value(dimensionAttr):
"""
Based on dimension information, caculate how many size of the list when squeeze
the high dimension value into single dimension array.
:param dimensionAttr: The dimension attribute
:return: An integer which specifies the size of one dimension arry
"""
if dimensionAttr.count>1: # multi dimension channel
return reduce((lambda x, y: x*y), dimensionAttr.value)
else:
return dimensionAttr.value | cb128ab549c76292271b6fc6a8333a9d67e280b8 | 80,415 |
def to_pilot_alpha(word):
"""Returns a list of pilot alpha codes corresponding to the input word
>>> to_pilot_alpha('Smrz')
['Sierra', 'Mike', 'Romeo', 'Zulu']
"""
pilot_alpha = ['Alfa', 'Bravo', 'Charlie', 'Delta', 'Echo', 'Foxtrot',
'Golf', 'Hotel', 'India', 'Juliett', 'Kilo', 'Lima', 'Mike',
'November', 'Oscar', 'Papa', 'Quebec', 'Romeo', 'Sierra', 'Tango',
'Uniform', 'Victor', 'Whiskey', 'Xray', 'Yankee', 'Zulu']
pilot_alpha_list = []
word = word.upper()
for c in word:
index = ord(c) - 65
pilot_alpha_list.append(pilot_alpha[index])
return pilot_alpha_list | c53faed46af9a1508af5baa0d4940e16dd0f7603 | 330,556 |
def convert_lowercase(in_str):
""" Convert a given string to lowercase
:param in_str: input text
:return: string
"""
return str(in_str).lower() | 941630030b297b9eb4b91d5989a813099aef5d06 | 195,413 |
import random
def rand_scale(s):
""" Return a random scale """
scale = random.uniform(1, s)
if(random.randint(1,10000)%2):
return scale
return 1./scale | e8113d3600de96606e43d07d9964e29804afbb09 | 145,916 |
def __is_const_str(data: str) -> bool:
""" Returns true if predicate is a constant string.
Note: supports constant strings starting with a lowercase letter
or a number
"""
# common case: string starts with a lowercase letter
test_str = data[0].islower()
# special case: string starts with a number. see '16_bit_one's_complement'
test_num = data[0].isnumeric() and not data.isnumeric()
return (test_str or test_num) and not '@' in data | ec6c45ea1bfc01dfa1949fea6c32b61341a86893 | 80,179 |
from re import compile
def check_regex(regex):
"""Generate a function to match the regex.
The regex will be compiled to speed up."""
regex = compile(regex)
def _check_regex(text):
result = regex.match(text)
return result is not None
return _check_regex | 74e562f0c96e0da9faaa3abd6ee50c87601fcfed | 565,314 |
def cast_python_object_to_aggregate(obj, aggregate):
""" This function casts a python object to an aggregate type. For instance:
[1.,2.,3.]-> ARRAY(1,3,REAL)"""
aggregate_lower_bound = aggregate.bound_1()
aggregate_upper_bound = aggregate.bound_2()
if type(obj)==list:
for idx in range(aggregate_lower_bound,aggregate_upper_bound+1):
aggregate[idx] = obj[idx-aggregate_lower_bound]
return aggregate | 75563a8f1d359d97214066fe8fef32da377bcb23 | 240,850 |
def katex_rendering_delimiters(app):
"""Delimiters for rendering KaTeX math.
If no delimiters are specified in katex_options, add the
katex_inline and katex_display delimiters. See also
https://khan.github.io/KaTeX/docs/autorender.html
"""
# Return if we have user defined rendering delimiters
if 'delimiters' in app.config.katex_options:
return ''
katex_inline = [d.replace('\\', '\\\\') for d in app.config.katex_inline]
katex_display = [d.replace('\\', '\\\\') for d in app.config.katex_display]
katex_delimiters = {'inline': katex_inline, 'display': katex_display}
# Set chosen delimiters for the auto-rendering options of KaTeX
delimiters = r'''delimiters: [
{{ left: "{inline[0]}", right: "{inline[1]}", display: false }},
{{ left: "{display[0]}", right: "{display[1]}", display: true }}
]'''.format(**katex_delimiters)
return delimiters | d1933ffda9b4fa4f668bdffc603bdfbcc07fc8ef | 150,079 |
def list_del_indices(mylist,indices):
"""
iteratively remove elements of a list by indices
Parameters
----------
mylist : list
the list of elements of interest
indices : list
the list of indices of elements that should be removed
Returns
-------
list
the reduced mylist entry
"""
for index in sorted(indices, reverse=True):
del mylist[index]
return mylist | c128bcadd6bc7b4589fc9da5bc26981078dbe5fe | 272,870 |
def valid_day(tms):
"""Checks if day of month is valid"""
year, month, day = tms[:3]
if day > 31:
return 0
if month in [4, 6, 9, 11] and day > 30:
return 0
if month == 2 and (year%4 == 0 and day > 29 or day > 28):
return 0
return 1 | d066c5bffdb0a192514b841481d2852867d06977 | 520,286 |
def handles(cursor):
"""Returns a dict of handles that form (ROWID, id) pairs."""
sql_handles = cursor.execute("SELECT ROWID, id FROM handle").fetchall()
all_handles = dict()
for handle in sql_handles:
rowid, contact_id = handle
all_handles[rowid] = contact_id
return all_handles | 0b9b865770f7775feb70d60680ea9a36f0f259f0 | 578,492 |
def get_object_location(object):
""" Retrieves the location of an object.
Parameters:
object (obj): Object to get location.
Returns:
list: Object location.
"""
return list(object.location) | 57fc0ebb30aab4bf78103aefc7cafabfa5b8bac7 | 479,002 |
def getAccuracy(l1, l2):
"""
Returns accuracy as a percentage between two lists, L1 and L2, of the same length
"""
assert(len(l1) == len(l2))
return sum([1 for i in range(0, len(l1)) if l1[i] == l2[i]]) / float(len(l1)) | 9fc02154334d8cc7f7d817d9e425bb89dfa1e9cd | 291,841 |
def calculate_accuracy(y_target, y_pred):
"""Returns accuracy
# Arguments
y_target: true label data
y_pred: estimated label data
# Returns
accuracy
"""
return y_target[y_target == y_pred].size * 1.0 / y_target.size | 962539dec6f6aa4873b0d7111824c7ea21096d56 | 348,807 |
def score2voice(score):
""" Generates a sequence of voice numbers from a score.
Args:
score (music21.Score): the input score
Returns:
list[int]: a list of voice numbers corresponding to each note in the score
"""
return [
int(str(n.getContextByClass("Voice"))[-2])
if not n.getContextByClass("Voice") is None
else 1
for n in score.flat.notes
for p in n.pitches
] | 89aae5df1f43042e344e6c78af2cd68ab0feddb6 | 466,901 |
import configparser
def _copy_ini(config: configparser.ConfigParser, section: str, option: str) -> str:
"""Returns the text needed to set the given option.
If the option isn't set in the old config, this returns the empty string.
Args:
config: Old configuration.
section: Section to copy from.
option: Option to copy.
"""
if config.has_option(section, option):
return '{}={}\n'.format(option, config.get(section, option))
else:
return '' | fbcf5d718f811a676c8aa8725e07d1472b01ae20 | 532,056 |
def to_numeric(s):
"""Convert string to int or float
Args:
s (str): string to be converted to numeric
Returns:
int or float
"""
try:
return int(s)
except ValueError:
return float(s) | 15bdf94ca894d4b2732f4ae456577b7aa9ebf05b | 202,676 |
import requests
def check_url_response(url: str) -> bool:
"""Confirms the HTTP response from the provided url.
Using HTTP requests attempts to check
whether a site under the url is online.
Args:
url: url to a site
Returns:
True if site is online, False otherwise
"""
with requests.get(url, timeout=5) as r:
return r.ok | bc762579c974c5797d052a2512b7075b251db303 | 499,235 |
def a_function(x=0):
"""This regular docstring does not conflicts with the above markdoc"""
return f'Hello {x}' | 094e11b59e9277fda9ee6fadfaed31af5640abf4 | 368,072 |
import requests
def upload_photo(account, f, group):
"""
Загружает фото на стену группы. Подробнее: https://vk.com/dev/upload_files
:param account: Объект vk.API, от имени которого будет загружено фото
:param f: file-like объект, содержащий фотографию
:param group: айди группы
:return: Объект photo согласно https://vk.com/dev/objects/photo содержащийся в словаре
"""
url = account.photos.getWallUploadServer(group_id=abs(group))['upload_url']
r = requests.post(url, files={'photo': ("photo.jpeg", f)}).json()
s = account.photos.saveWallPhoto(group_id=abs(group),
photo=r['photo'],
server=r['server'], hash=r['hash'])[0]
return s | 4529c6c4e38a55af192f2149e92f49e277eb1bd6 | 131,219 |
def vhost_scsi_controller_add_target(client, ctrlr, scsi_target_num, bdev_name):
"""Add LUN to vhost scsi controller target.
Args:
ctrlr: controller name
scsi_target_num: target number to use
bdev_name: name of bdev to add to target
"""
params = {
'ctrlr': ctrlr,
'scsi_target_num': scsi_target_num,
'bdev_name': bdev_name,
}
return client.call('vhost_scsi_controller_add_target', params) | 5052ba8d02572aa1e8971dce02d959b1827ee070 | 149,069 |
def GetPlistValue(plist, value):
"""Returns the value of a plist dictionary, or False."""
try:
return plist[value]
except KeyError:
return False | 783f7d675f20f3ed81158b1c22ad549a0770bc00 | 561,621 |
import linecache
def checkline(filename, lineno, ui):
"""Return line number of first line at or after input
argument such that if the input points to a 'def', the
returned line number is the first
non-blank/non-comment line to follow. If the input
points to a blank or comment line, return 0. At end
of file, also return 0."""
line = linecache.getline(filename, lineno)
if not line:
ui.print('*** End of file')
return 0
line = line.strip()
# Don't allow setting breakpoint at a blank line
if (not line or (line[0] == '#') or (line[:3] == '"""') or line[:3] == "'''"):
ui.print('*** Blank or comment')
return 0
# When a file is read in and a breakpoint is at
# the 'def' statement, the system stops there at
# code parse time. We don't want that, so all breakpoints
# set at 'def' statements are moved one line onward
if line[:3] == 'def':
instr = ''
brackets = 0
while 1:
skipone = 0
for c in line:
if instr:
if skipone:
skipone = 0
elif c == '\\':
skipone = 1
elif c == instr:
instr = ''
elif c == '#':
break
elif c in ('"', "'"):
instr = c
elif c in ('(', '{', '['):
brackets = brackets + 1
elif c in (')', '}', ']'):
brackets = brackets - 1
lineno = lineno + 1
line = linecache.getline(filename, lineno)
if not line:
ui.print('*** end of file')
return 0
line = line.strip()
if not line:
continue # Blank line
if brackets <= 0 and line[0] not in ('#', '"', "'"):
break
return lineno | acdcc76a91e7882a0a836e2783847e23deb1f84b | 659,623 |
def create_req_and_opt_graph(req_comp_g_contracted, complete_g, circuit_rpp, GranularConnector_EdgeList):
"""
Creates a graph with required and optional edges delineated for visualization
Args:
req_comp_g_contracted (NetworkX MultiDiGraph): required component graph containing contracted edges
generated by initialize_rpp.InnerAndOuterToEdgeListFile
complete_g (NetworkX MultiDiGraph): complete street network graph
generated by initialize_rpp.InnerAndOuterToEdgeListFile
circuit_rpp (list): rpp circuit generated by postman_problems.solver.rpp and
edited by circuit_path_string_to_int
Returns:
final_graph (NetworkX DiGraph): graph of route with optional and required edges delineated
print statements with required and optional edge breakdown
"""
final_graph = req_comp_g_contracted.copy()
unexpanded_edges = 0
unexpanded_edges_list = []
granular_connector_edges = 0
granular_connector_edges_list = []
granular_req_edges = 0
granular_req_edges_list = []
optional_edges = 0
optional_edges_list = []
for e in circuit_rpp:
if [e[0], e[1]] not in unexpanded_edges_list:
unexpanded_edges+=1
unexpanded_edges_list+=[[e[0], e[1]]]
# add granular optional edges to final_graph
path = e[3]['path']
for pair in list(zip(path[:-1], path[1:])):
if (req_comp_g_contracted.has_edge(pair[0], pair[1])):
edge = req_comp_g_contracted[pair[0]][pair[1]][0]
if [pair[0], pair[1]] in GranularConnector_EdgeList:
final_graph[pair[0]][pair[1]][0]['granular_type'] = 'req street and connector'
if [pair[0], pair[1]] not in granular_connector_edges_list:
granular_connector_edges+=1
granular_connector_edges_list+=[[pair[0], pair[1]]]
elif 1 in req_comp_g_contracted[pair[0]][pair[1]]:
granular_connector_edges+=1
else:
if [pair[0], pair[1]] not in granular_req_edges_list:
final_graph[pair[0]][pair[1]][0]['granular_type'] = 'req street'
granular_req_edges+=1
granular_req_edges_list+=[[pair[0], pair[1]]]
else:
if [pair[0], pair[1]] in GranularConnector_EdgeList:
if [pair[0], pair[1]] not in granular_connector_edges_list:
final_graph.add_edge(pair[0], pair[1], granular=True, granular_type='connector')
granular_connector_edges+=1
granular_connector_edges_list+=[[pair[0], pair[1]]]
elif [pair[0], pair[1]] not in optional_edges_list:
final_graph.add_edge(pair[0], pair[1], granular=True, granular_type='optional')
optional_edges+=1
optional_edges_list+=[[pair[0], pair[1]]]
else:
print(pair)
for n in path:
final_graph.add_node(n, y=complete_g.nodes[n]['y'], x=complete_g.nodes[n]['x'])
print('Edges in Circuit')
print('\tTotal Unexpanded Edges: {}'.format(unexpanded_edges))
print('\tTotal Edges (All Contracted Edges Granularized): {}'.format(granular_connector_edges+granular_req_edges+optional_edges))
print('\t\tGranular Connector Edges: {}'.format(granular_connector_edges))
print('\t\tGranular Required Edges: {}'.format(granular_req_edges))
print('\t\tGranular Optional Edges: {}'.format(optional_edges))
return final_graph | 991ba857418d49ef7ce29cde2ad1fb45826951b8 | 455,031 |
import hashlib
def hash_file(fpath, block_size=2**16):
"""
Get the SHA1 hash of a file.
Args:
fpath (str): Path to file.
block_size (int): Number of bytes to read from the file at a time.
Returns:
str: hash
SHA1 digest as a hex string.
"""
hash_ = hashlib.sha1()
with open(fpath, "rb") as f:
while (buf := f.read(block_size)):
hash_.update(buf)
return hash_.hexdigest() | 855471f796ab00dbea2e6c3b0633ee2b2a63da56 | 54,284 |
def make_grouped_share(df, variable, name="share"):
"""Normalise a variable (for using in grouped dfs)"""
df[name] = df[variable] / df[variable].sum()
return df | 258f93b82bac8439f002e4f4b1bd95a3c04878a0 | 603,292 |
import logging
def create_caplog(caplog):
"""Set global test logging levels."""
def _level(level=logging.INFO):
caplog.set_level(level, logger="aiocrontab.core")
return caplog
return _level | 39aa8814066eebd5181e6443c5a2900f9149844e | 413,895 |
from datetime import datetime
def datetime_range_inclusive(min, max, format):
"""
Return a value check function which raises a ValueError if the supplied
value when converted to a datetime using the supplied `format` string is
less than `min` or greater than `max`.
"""
dmin = datetime.strptime(min, format)
dmax = datetime.strptime(max, format)
def checker(v):
dv = datetime.strptime(v, format)
if dv < dmin or dv > dmax:
raise ValueError(v)
return checker | 92fe58094dcc9cf8ed11c5b829614ca83b1acc76 | 579,744 |
from typing import Counter
def parse(filename):
"""Parse the input file into Python objects.
Input file has lines like this:
sdfsjdfk sjdkf lfke ljrl (contains dairy, soy)
Parameters:
filename (str): the name of the input file to open
Returns:
candidates (Dict[str, Set[str]]): a dict with keys that are the
allergens (dairy, soy, etc.) and values that are the sets
of all possible ingredients that could contain those
allergens.
counts (Counter): a count of how many lines each ingredient
appears in.
"""
candidates = dict()
counts = Counter()
with open(filename, "r") as f:
for line in f:
ingredients, _contains, allergens = line.partition(" (contains ")
ingredients = set(ingredients.split())
counts += Counter(ingredients)
for allergen in allergens.strip(")\n").split(", "):
if allergen in candidates:
candidates[allergen] &= ingredients
else:
candidates[allergen] = ingredients.copy()
return candidates, counts | bd2ca6dcd3c843d462a4d3faf507c47fb49b8984 | 606,175 |
def font_style(keyword):
"""``font-style`` descriptor validation."""
return keyword in ('normal', 'italic', 'oblique') | 6b49a62708cd2955d142cbd9fbda2efbb669dfcf | 305,135 |
def _allnan_or_nonan(df, column: str) -> bool:
"""Check if all values in a column are NaN or not NaN
Returns
-------
bool
Whether the dataframe column has all NaNs or no NaN valles
Raises
------
ValueError
When the column has a mix of NaNs non NaN values
"""
if df[column].isnull().all():
return False
if df[column].isnull().any():
raise ValueError(
f'The data in the {column} column should either be all NaN or there should be no NaNs'
)
return True | 6f1a531055cc7eed7c184e4861d6e0cc03231954 | 290,428 |
def num2lett(ind):
""" Returns the alphabet string based on the order number """
# 0 -> a, ..., 25 -> z
return chr(ind + 97) | a20df5babccf45579033000f4adc14288402cfb7 | 413,531 |
import random
import string
def random_str(length: int = 8):
"""Generate a random string of fixed length
"""
return ''.join(random.choice(string.ascii_letters) for i in range(length)) | bd5af189398a610055660098ebfae78b134d837d | 91,649 |
from datetime import datetime
def now_str(time=False):
"""Return string to be used as time-stamp."""
now = datetime.now()
return now.strftime(f"%Y-%m-%d{('_%H:%M:%S' if time else '')}") | 02b73bda5f27e7c25120d50d50244bd103661c90 | 9,174 |
def path_macro_sub(s, ip='', dp='', gp='', ei=''):
"""
Replace macros with current paths:
- <INSTALL_DIR> is replaced with the contents of ip
- <DATA_DIR> is replaced with the contents of dp
- <GLOBAL_DATA> is replaced with the contents of gp
- <EVENT_ID> is replaced with the contents of ei
e.g., path_macro_sub("<INSTALL_DIR>/<DATA_DIR>", "hello", "world")
would return "hello/world". It is not an error if the original string
does not contain one or any of the substitution strings.
Args:
s (str):
The string into which the replacements are made.
ip (str):
The string with which to replace <INSTALL_DIR>.
dp (str):
The string with which to replace <DATA_DIR>.
gp (str):
The string with which to replace <GLOBAL_DATA>.
ei (str):
The string with which to replace <EVENT_ID>.
Returns:
str: A new string with the sub-string replacements.
"""
s = s.replace('<INSTALL_DIR>', ip)
s = s.replace('<DATA_DIR>', dp)
s = s.replace('<GLOBAL_DATA>', gp)
s = s.replace('<EVENT_ID>', ei)
return s | 6c847345d572859b5f852933d7654ac9a6ae0a10 | 105,439 |
def get_bb_ops(bb, ops):
"""Helper function returning all instructions (sorted by address) from ops
that are inside the basic block bb"""
bb_ops = [ op for addr, op in ops.items() if addr >= bb['addr'] and addr < bb['addr'] + bb['size']]
bb_ops = sorted(bb_ops, key=lambda op : op['offset'])
return bb_ops | da51d3818b4203e2f9e8a8c3bed73f2dd8e08dd9 | 376,764 |
def get_transitions(state_list, xml_transition_list):
"""Get transitions if state(s) from state_list are source/destination"""
new_transition_list = set()
for new_state in state_list:
for transition in xml_transition_list:
if new_state.id == transition.source:
new_transition_list.add(transition)
if new_state.id == transition.destination:
new_transition_list.add(transition)
return new_transition_list | e6741759d77fade600edff63442f6e4aecdb5d14 | 584,145 |
def is_pull_request(issue):
"""Return True if the given issue is a pull request."""
return 'pull_request_url' in issue | 8b295f2d6743868bfcdb50ae9db02c0c7b30c605 | 291,421 |
import torch
def dump_ir(tensors, ir_format):
"""Return a dump of the tensors in the specified format.
Valid format are
- text: for LTC IR
- backend: for the activate backend IR
"""
if ir_format == "text":
return torch._C._lazy._get_tensors_text(tensors)
elif ir_format == "backend":
return torch._C._lazy._get_tensors_backend(tensors)
else:
raise RuntimeError(f"Unrecognized IR format: {ir_format}") | 2b502a5ae1c8ef2fb2aaf4f1671f6f2aef85c4a4 | 392,004 |
def to_binary(val, expected_length=8):
"""Converts decimal value to binary
:param val: decimal
:param expected_length: length of data
:return: binary value
"""
val = bin(val)[2:] # to binary
while len(val) < expected_length:
val = "0" + val
return val | ab84ace5a149d4dcb709e9865457c0d0fe461cfd | 194,896 |
def dansRectangle(x,y, cx,cy, L, H):
"""
Test l'appartenance à un rectangle.
Paramètres:
(x,y) --> point à tester,
(cx,cy) --> coin supérieur gauche du rectangle,
L -->.width du rectangle,
H -->.height du rectangle.
Retourne ``Vrai`` si le point est dans le rectangle, ``Faux`` sinon.
"""
return cx <= x < cx+L and cy <= y < cy+H | 9227ec67800e5f1552c01824b63434f8a0740d39 | 74,129 |
def int_ceil(x):
"""return ceiling of value should be an integer"""
_int = int(x)
if _int < x:
return int(x + 1)
else:
return _int | 9f9de801bb24039a6ae5e89c700b4268a73eb30b | 602,893 |
def unflatten(dictionary):
""" Unflattens a dictionary by splitting keys at '.'s.
This function unflattens a hierarchical dictionary by splitting
its keys at '.'s. It is used internally for converting the
configuration dictionary to more convenient formats. Implementation was
inspired by `this StackOverflow post
<https://stackoverflow.com/questions/6037503/python-unflatten-dict>`_.
Parameters
----------
dictionary : dict
The flat dictionary to be unflattened.
Returns
-------
dict
The unflattened dictionary.
See Also
--------
lfads_tf2.utils.flatten : Performs the opposite of this operation.
"""
resultDict = dict()
for key, value in dictionary.items():
parts = key.split(".")
d = resultDict
for part in parts[:-1]:
if part not in d:
d[part] = dict()
d = d[part]
d[parts[-1]] = value
return resultDict | 8e0f45ff4166a19b6f21d5a18119e1bed742c08d | 327,220 |
import math
def angle_dev(a, b):
"""
Computes the minimum signed difference between two radian angles.
Parameters
----------
a: float
angle A
b: float
angle B
Returns
-------
float
difference between A and B
"""
diff = math.fmod((a - b + math.pi), (math.pi * 2)) - math.pi
return diff | 2cb08967f48991f9cf2205ff7b867ff7b79deb93 | 458,519 |
from typing import List
def _sampling_from_alias_wiki(
alias: List[int],
probs: List[float],
random_val: float,
) -> int:
"""
Draw sample from a non-uniform discrete distribution using Alias sampling.
This implementation is aligned with the wiki description using 1 random number.
:param alias: the alias list in range [0, n)
:param probs: the pseudo-probability table
:param random_val: a random floating point number in the range [0.0, 1.0)
Return the picked index in the neighbor list as next node in the random walk path.
"""
n = len(alias)
pick = int(n * random_val)
y = n * random_val - pick
if y < probs[pick]:
return pick
else:
return alias[pick] | c67d2d698ace15c798cda51049b7ddd880c48a71 | 36,124 |
def get_triangle_top_midpoint(point_list):
"""Returns the midpoint of the top of a triangle regardless of the orientation."""
y = int(min([x[1] for x in point_list]))
x = int((min([x[0] for x in point_list]) + max([x[0] for x in point_list])) / 2)
return x, y | 57d585044790702f85f8e73c69e07aa6698dd888 | 196,491 |
def write_flaggelin_db_fasta(df, fasta_name = "flagellin_db.fasta"):
"""
Creates a fasta file from a Dataframe
Parameters
----------
df : Pandas DataFrame
This will be the DataFrame output from construct_reference_flagellin_dataframe
fasta_name : str
Name of the output fasta file
Returns
-------
fasta_name : str
Note
----
Write file to working directory
"""
with open(fasta_name, 'w') as oh:
for i,r in df.iterrows():
oh.write(f">{r['Accession']}\n{r['seq']}\n")
return fasta_name | 073d5501ede76006756adc2cca31e3b8b418af22 | 597,805 |
def assign_coordinate_file_ending(program: str):
"""
Returns the coordinate file ending specific to the program
Paramaeters
-----------
program: str
Program being used.
Returns
-------
file_ending: str
string of the file ending
"""
if program == 'Tinker':
return '.xyz'
elif program == 'Test':
return '.npy'
elif program == 'CP2K':
return '.pdb'
elif program == 'QE':
return '.pw' | 3c3b7fdf8544f4a389f0380f96c6bb0bc08a09b9 | 371,088 |
def isotropic_rp(**kwargs):
"""
Returns constant (1.0)
:return:
"""
return 1.0 | 71bfc2d910abe1b2891b621f0f99da2b2a721004 | 521,664 |
def FlavorName(flavor):
"""Given a flavor, get a string name for it."""
if isinstance(flavor, tuple):
return flavor[0]
else:
return flavor | 319b186e738abef990a9b84b098aaba7895eb4eb | 449,957 |
def remaining_G9_12_cap(k12):
"""
Remaining enrollment capacity available at the school.
"""
return (k12['G9_12_cap'] - k12['G9_12']).clip(0) | 68d31e3b24ff3405b50cd53691be0f92baee0ba9 | 629,869 |
def find_flank_zerox(sig, flank):
"""Find zero-crossings on rising or decaying flanks of a filtered signal.
Parameters
----------
sig : 1d array
Time series to detect zero-crossings in.
flank : {'rise', 'decay'}
Which flank, rise or decay, to use to get zero crossings.
Returns
-------
zero_xs : 1d array
Samples of the zero crossings.
Examples
--------
Find rising flanks in a filtered signal:
>>> from neurodsp.sim import sim_bursty_oscillation
>>> from neurodsp.filt import filter_signal
>>> sig = sim_bursty_oscillation(10, 500, freq=10)
>>> sig_filt = filter_signal(sig, 500, 'lowpass', 30)
>>> rises_flank = find_flank_zerox(sig_filt, 'rise')
"""
assert flank in ['rise', 'decay']
pos = sig <= 0 if flank == 'rise' else sig > 0
zero_xs = (pos[:-1] & ~pos[1:]).nonzero()[0]
# If no zero-crossing's found (peak and trough are same voltage), output dummy value
zero_xs = [int(len(sig) / 2)] if len(zero_xs) == 0 else zero_xs
return zero_xs | 0ab0c446e358e47eeb0ec13f1968de8ac5f687f2 | 221,482 |
async def callback(app, message):
"""Print the message retrieved from the file consumer."""
print(app.name, "received:", message)
return message | a92eb3f1637e48cb2eca44334e4801ede1c5b4a7 | 475,107 |
def _help_body_atom(body: str) -> str:
"""Helper for converting body for an Atom snippet"""
body = body.replace('\\', '\\\\\\\\')
body = body.replace('"', '\\"')
return body | 0ea31d390519f3c4f93c0d36a25687e93a7d1c9b | 480,512 |
def num_grid_points(density_file_name):
"""
This function extracts the number of points in each cartesian direction.
Parameters
----------
density_file_name : string
This input is the full path of the .dx file from which to get data.
Returns
-------
num_x : int
This output is the number of points in the x-directions.
num_y : int
This output is the number of pointsg in the y-direction.
num_z : int
This output is the number of points in the z-direction.
"""
file_handle = open(density_file_name, mode='r')
size_line = file_handle.readlines()[0]
data = size_line.split()
num_x = int(data[5])
num_y = int(data[6])
num_z = int(data[7])
return num_x, num_y, num_z | e13fb42803fa36a71369d3d7ba0afaa8ea730a54 | 325,716 |
def unqualify(name: str) -> str:
"""Return an Unqualified name given a Qualified module/package Name"""
return name.rsplit(".", maxsplit=1)[-1] | bbf4cc310fb26ff4c1c0f103242c204e1c147f9d | 561,328 |
def is_related_field(field):
"""
Returns true if the field created as a related field from another model.
"""
return hasattr(field, 'related_name') | e396246c7170c7829f83898a10298e591147478a | 562,483 |
import logging
def _make_logger(name, /, level='error'):
"""
Return a logger.
"""
# See: https://stackoverflow.com/q/43109355/4970632
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(name)s (%(levelname)s): %(message)s'))
logger = logging.getLogger(name)
logger.addHandler(handler)
logger.setLevel(getattr(logging, level.upper())) # logging.ERROR or logging.INFO
return logger | 3ca60291e8996e72914551095365e88e4b313e1f | 492,471 |
def weeds_prec(x_row, y_row):
"""
WeedsPrec(x -> y) = (\sigma_c \in Fx,y w_x(c)) / (\sigma_c \in Fx w_x(c))
Fx,y is the mutual contexts (non-zero entries) of x and y rows in the ppmi matrix
w_x(c) is the weight of feature c in x's feature vector, i.e. ppmi(x, c)
Fx is the row of x in the ppmi matrix
:param x_row: x's row in the co-occurrence matrix
:param y_row: y's row in the co-occurrence matrix
:return:
"""
# Get the mutual contexts: use y as a binary vector and apply dot product with x:
# If c is a mutual context, it is 1 in y_non_zero and the value ppmi(x, c) is added to the sum
# Otherwise, if it is 0 in either x or y, it adds 0 to the sum.
y_row.to_ones()
numerator = x_row.multiply(y_row).sum() # dot-product
# The sum of x's contexts (for ppmi) is the sum of x_row.
denominator = x_row.sum()
return 0.0 if denominator == 0 else numerator * (1.0 / denominator) | 2319a22d9aa8fe8d0d1deb26b328add8bb943814 | 336,547 |
from typing import Iterable
def wherevarinlist(var: str, vals: Iterable[str]) -> str:
"""Return str sql query for when variable value is in a list of values
>>> wherevarinlist("STATE", ["FL", "GA", "SC", "NC"])
"STATE In ('FL', 'GA', 'SC', 'NC')"
"""
vals = [f"'{val}'" for val in vals]
vals_str = f"({', '.join(vals)})"
return f"{var} In {vals_str}" | efde9f40021c798216d5c17f950f7dc434e09f56 | 188,593 |
def scale_image(x):
"""Normalize iamge for visualization purpose."""
x = x - x.min()
if x.max() > 0:
x = x / x.max()
return x | 6cd3bbdc525c8ef8728b73aa7444c49890738852 | 459,796 |
import json
def erl_tuple_to_list(raw_data):
"""
Formats Erlang data to be testable in Python.
raw_data is a list of Erlang tuples serialised as string
E.g.: '[{"giver1","recipient1"},{"giver2","recipient2"}]'
formatted_list is a list of lists:
E.g.:[ ['giver1','recipient1'], ['giver2','recipient2'] ]
"""
# Replace Erlang tuple chars (and binary strings) with json list chars.
replacements = {
'{': '[',
'}': ']',
'<<': '',
'>>': ''
}
for old_val, new_val in replacements.items():
raw_data = raw_data.replace(old_val, new_val)
# Deserialise into list of lists
formatted_list = json.loads(raw_data)
return formatted_list | 010edd5630a4d311289ca3f1a6aeb6f549ea3cfc | 528,523 |
import math
def f2E(f, e):
"""
Function: f2E
Purpose: Maps true anomaly angles into eccentric anomaly angles.
This function requires the orbit to be either circular or
non-rectilinar elliptic orbit.
Inputs:
f = true anomaly angle (rad)
e = eccentricity (0 <= e < 1)
Outputs:
Ecc = eccentric anomaly (rad)
"""
if e >= 0.0 and e < 1.0:
Ecc = 2.0 * math.atan2(math.sqrt(1.0 - e) * math.sin(f / 2.0), math.sqrt(1.0 + e) * math.cos(f / 2.0))
return Ecc
raise ValueError('Error: f2E() received e = {}, the value of e should be 0 <= e < 1'.format(str(e))) | 1e2cbacec7403833113051c96970d08e82b58b97 | 417,613 |
def get_timestamp(row):
"""
Extract the timestamp from the annotations.
"""
return row[1] | 872f9c06a542cd9bbe5ff696d490620e0b735a2b | 407,949 |
def multiproduct(seq=(), start=1):
"""
Return the product of a sequence of factors with multiplicities,
times the value of the parameter ``start``. The input may be a
sequence of (factor, exponent) pairs or a dict of such pairs.
>>> multiproduct({3:7, 2:5}, 4) # = 3**7 * 2**5 * 4
279936
"""
if not seq:
return start
if isinstance(seq, dict):
seq = iter(seq.items())
units = start
multi = []
for base, exp in seq:
if not exp:
continue
elif exp == 1:
units *= base
else:
if exp % 2:
units *= base
multi.append((base, exp//2))
return units * multiproduct(multi)**2 | b1f40f1251e521e773f29355161f7c8b5c804b29 | 632,899 |
def format_card(cards):
"""
Returns a dictionary with the passed cards rank, color, and suit.
Args:
cards: A list of 2-letter strings representing card rank and suit.
"""
formated_cards = []
for card in cards:
if card[0] == 'T':
card_dict = {'rank': '10'}
else:
card_dict = {'rank': card[0]}
if card[1] == 'C':
card_dict['color'] = 'black'
card_dict['suit'] = 'clubs'
elif card[1] == 'D':
card_dict['color'] = 'red'
card_dict['suit'] = 'diams'
elif card[1] == 'H':
card_dict['color'] = 'red'
card_dict['suit'] = 'hearts'
elif card[1] == 'S':
card_dict['color'] = 'black'
card_dict['suit'] = 'spades'
formated_cards.append(card_dict)
return formated_cards | efe497b9552b77f109d7096132e5fb465e268b8f | 448,426 |
from typing import Optional
import math
def _phantom_float_format(
val: float, length: Optional[int] = None, justify: Optional[str] = None
):
"""Float to Phantom style float string.
Parameters
----------
val : float
The value to convert.
length : int
A string length for the return value.
justify : str
Justify text left or right by padding based on length.
Returns
-------
str
The float as formatted str.
"""
if math.isclose(abs(val), 0, abs_tol=1e-50):
string = '0.000'
elif abs(val) < 0.001:
string = f'{val:.3e}'
elif abs(val) < 1000:
string = f'{val:.3f}'
elif abs(val) < 10000:
string = f'{val:g}'
else:
string = f'{val:.3e}'
if isinstance(length, int):
if justify is None:
justify = 'left'
else:
if justify.lower() in ['r', 'right']:
return string.rjust(length)
elif justify.lower() in ['l', 'left']:
return string.ljust(length)
else:
raise ValueError('justify is either "left" or "right"')
else:
raise TypeError('length must be int')
return string | cd8b7fd3e9f6a38c72a61a3403f82485eee6514f | 248,668 |
def _decode_node(s):
"""Map string `s` to node-like integer."""
if s == 'F':
return -1
elif s == 'T':
return 1
else:
return int(s) | b7c8f80d523e35201d306a5b21f4f4c9f1471e91 | 304,033 |
def t01_SimpleGetPut(C, pks, crypto, server):
"""Uploads a single file and checks the downloaded version is correct."""
alice = C("alice")
alice.upload("a", "b")
return float(alice.download("a") == "b") | 6f58d83a48856e339a4698f1717a8b464127e02a | 673,364 |
def _key_in_string(string, string_formatting_dict):
"""Checks which formatting keys are present in a given string"""
key_in_string = False
if isinstance(string, str):
for key, value in string_formatting_dict.items():
if "{" + key + "}" in string:
key_in_string = True
return key_in_string | 706beaa06973b5071ecdbf255be83ee505202668 | 677,773 |
def param_invalid_value_info(param_name, default_value):
"""
Returns info warning an invalid parameter configuration.
"""
return "Parameter warning: the configuration of hyper-parameter "+ \
"'{}' is not valid, will use default value '{}'" \
.format(param_name, default_value) | 87008226a851f512f19224440a2c501ca85957d5 | 117,217 |
async def toggle_group(role_title, ctx):
"""Toggles the user's roles. For example, adds a role if they do not have it already, or removes it if they do.
We allow the message handling to be done by the function calling this function, for more customizability.
Function has four potential return results.
'whisper' - The user is whispering the bot. They dumb.
'role not found' - Did not find this particular role on the server in question.
'removed' - Role was removed from the user
'added' - Role was added to the user."""
if ctx.guild is None:
return 'whisper'
server_roles = ctx.guild.roles
#print("Server roles", server_roles)
user_roles = ctx.author.roles
#print("Author roles", user_roles)
role_id = ""
#Finding the role on the server. If it doesn't exist, we'll let the user know.
found_role = False
role_id_index = ''
for i in server_roles:
#print(i.name.lower())
if i.name.lower() == role_title.lower(): #.lower is for consistency
role_id = i
found_role = True
try:
role_id_index = user_roles.index(i)
except:
pass
if not found_role:
return "role not found"
else:
if role_id in user_roles:
# User has this role, need to remove it.
user_roles.pop(role_id_index)
await ctx.author.edit(roles=user_roles, reason="Automated role removal requested by user")
return "removed"
else:
# User does not have this role
user_roles.append(role_id)
await ctx.author.edit(roles=user_roles, reason="Automated role add requested by user")
return "added" | 9ab882212e15f302f5fac5f65573b22c901fb4b9 | 300,152 |
def deltaify_traces(traces, final_byte_duration=9999):
"""Convert absolute start times in traces to durations.
Traces returned by `read_traces_csv` pair bytes with start times. This
function computes how long each byte remains on the bus and replaces the
start time with this value in its output. Note that the final duration can't
be calculated and will be given the duration `final_byte_duration`.
Args:
traces: Traces to "deltaify" as described.
final_byte_duration: Duration to assign to the final byte.
Returns:
"Deltaified" traces as described.
"""
deltaified_traces = []
for i in range(len(traces) - 1):
dt = traces[i+1][0] - traces[i][0]
deltaified_traces.append((dt, traces[i][1]))
deltaified_traces.append((final_byte_duration, traces[-1][1]))
return deltaified_traces | 8185a9825d4706bdf8a579fcefec5e27ca8c3baa | 701,612 |
def in2pt(inval=1):
"""1in -> 72pt"""
return float(inval) * 72.0 | fd3be8149df6b1ea3e8848a313b9d36eb51af4a9 | 294,196 |
def MakeGoogleUniqueID(cloud_instance):
"""Make the google unique ID of zone/project/id."""
if not (cloud_instance.zone and cloud_instance.project_id and
cloud_instance.instance_id):
raise ValueError("Bad zone/project_id/id: '%s/%s/%s'" %
(cloud_instance.zone, cloud_instance.project_id,
cloud_instance.instance_id))
return "/".join([
cloud_instance.zone.split("/")[-1], cloud_instance.project_id,
cloud_instance.instance_id
]) | 1327d17dc431ff5300f1f27a7ed36788476b10c1 | 206,127 |
import ipaddress
def isOverlapCIDR(cidrA,cidrB):
"""
Check if the CIDR A have overlap with CIDR B
"""
return ipaddress.IPv4Network(cidrA).overlaps(ipaddress.IPv4Network(cidrB)) | 31a9f7ac1573bd618106569ea4e26573eecc3bac | 254,375 |
def is_root(dmrs, nodeid):
"""
Check if a node has no incoming links
"""
return not any(dmrs.get_in(nodeid, itr=True)) | 9680901fb212d32c5e64c077fb73247c443374f7 | 488,274 |
from datetime import datetime
def to_epoch(dt: datetime) -> float:
"""Given a datetime object, return seconds since epoch.
Args:
dt: Datetime object
Returns:
seconds since epoch for dt
"""
dt = dt.replace(tzinfo=None)
return (dt - datetime(1970, 1, 1)).total_seconds() | b0dfd68331026fc1c92f1f0f21a832a27e735daa | 513,989 |
def _aggregate_score_dicts(scores , name = None):
""" Aggregate a list of dict to a dict of lists.
Parameters
----------
scores : list of dictionaries
Contains a dictionary of scores for each fold.
name : str, optional
Prefix for the keys. The default is None.
Returns
-------
dict of lists
Example
-------
scores = [{'roc_auc' : 0.78 , 'accuracy' : 0.8} , {'roc_auc' : 0.675 , 'accuracy' : 0.56} , {'roc_auc' : 0.8 , 'accuracy' : 0.72 }]
_aggregate_score_dicts(scores) = {'roc_auc' : [0.78 , 0.675 , 0.8] , 'accuracy' : [0.8 , 0.56 , 0.72]}
"""
if name is None :
return {key: [score[key] for score in scores] for key in scores[0]}
else :
return {name + str('_') + key: [score[key] for score in scores] for key in scores[0]} | cde1662990249573677e122ad107485c251bb297 | 308,751 |
def combine_annotations(annotations):
"""Given a list of annotations made by a Saber model, combines all annotations under one dict.
Args:
annotations (list): a list of annotations returned by a Saber model
Returns:
a dict containing all annotations in `annotations`.
"""
combined_anns = []
for ann in annotations:
combined_anns.extend(ann['ents'])
# create json containing combined annotation
return combined_anns | d096f04ec99cca4cf6a3e7c315703d2c8d659eea | 377,362 |
def _compile_model(model):
""" Compile the CNN """
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
return model | a2a0e3fd32584fdb50a10ba3312b1fd757e8677c | 401,116 |
import re
def count_ops_in_hlo_proto(hlo_proto,
ops_regex):
"""Counts specific ops in hlo proto, whose names match the provided pattern.
Args:
hlo_proto: an HloModuleProto object.
ops_regex: a string regex to filter ops with matching names.
Returns:
Count of matching ops.
"""
ops_count = 0
for computation in hlo_proto.computations:
for instr in computation.instructions:
if re.match(ops_regex, instr.name) is not None:
ops_count += 1
return ops_count | 0e1a933a85e738e505f1b229625023f88e4c2d33 | 679,489 |
def archimedes(dp, rhog, rhos, mu):
"""
Calculate the dimensionless Archimedes number.
.. math:: Ar = \\frac{dp^3 \\rho_g (\\rho_s - \\rho_g) g}{\\mu^2}
Parameters
----------
dp : float
Particle diameter [m]
rhog : float
Gas density [kg/m³]
rhos : float
Solid density [kg/m³]
mu : float
Dynamic viscosity [kg/(m⋅s)]
Returns
-------
ar : float
Archimedes number [-]
Example
-------
>>> archimedes(0.001, 910, 2500, 0.001307)
8309.1452
References
----------
Daizo Kunii and Octave Levenspiel. Fluidization Engineering.
Butterworth-Heinemann, 2nd edition, 1991.
"""
g = 9.81 # gravity acceleraton [m/s²]
ar = (dp**3 * rhog * (rhos - rhog) * g) / (mu**2)
return ar | 6ab7a9eaa4110af00e5340e18f39ca7143d3f58f | 572,262 |
import random
def randomiser(count):
"""
Returns a random number from 0 to count.
"""
return random.randint(0, count-1) | 5bd3ea52d6b1adc019d5370eb648ea55ca797404 | 153,680 |
import time
import calendar
def adjust_time(t, delta):
"""
Adjust a (UTC) struct_time by delta seconds
:type t: struct_time
:type delta: int
:param delta: seconds
:rtype: struct_time
"""
return time.gmtime(calendar.timegm(t) + delta) | 4d2ff67523b05a6f0af02ebb3b230e8875a39ae4 | 39,943 |
def get_between(txt:str, open_char:str="'", close_char:str="'", start:int=1 ):
"""Parse all content in supplied text that appears between the open and close characters, exclusively.
If txt is empty, or open_char is not found, returns ('', -1, -1). If the close_char is never found,
returns the txt from the starting positon through the end of the txt.
Args:
txt (str): String text to parse out subset.
open_char (str, optional): Character defining the opening of the subset. Defaults to "'".
close_char (str, optional): Character defining the close of the subset. Defaults to "'".
start (int, optional): Position in txt to start searching. Defaults to 1.
Returns:
tuple: (subset:str, starting position of subset:int, ending position of subset:int)
"""
sp = txt.find(open_char, start)
ep = sp+1
if sp == -1 or open_char=='' or close_char=='': return ('',-1,-1) # if not found, or empty
if open_char == close_char: # quote like things
while ep <= len(txt):
ep1 = txt.find(close_char, ep)
ep = len(txt)+1 if ep1 == -1 else ep1+1
if txt[ep1:ep1+1] == close_char and close_char not in [txt[ep1-1:ep1], txt[ep1+1:ep1+2]]:
break
else: # paren-like things
i = 0
for c in txt[sp:]:
if c == open_char: i+=1
if c == close_char: i -=1
if i == 0: break
ep +=1
sp +=1
ep -=1
return (txt[sp:ep].replace(open_char*2, open_char) if open_char == close_char else txt[sp:ep], sp, ep) | d7709de9c12676ba8a139df15795279924bf6165 | 454,013 |
def ij2bl(i, j):
"""
Convert antenna numbers to baseline number.
Parameters
----------
i : int
first antenna number
j : int
second antenna number
Returns
-------
int
baseline number
"""
if i > j:
i, j = j, i
if j + 1 < 256:
return 256 * (i + 1) + (j + 1)
return 2048 * (i + 1) + (j + 1) + 65536 | a51f64b7856b771ca2f800120bab7a108fe8acf0 | 442,056 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.