content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def select_user(query_message, mydb):
"""
Prompt the user to select from a list of all database users.
Args:
query_message - The messages to display in the prompt
mydb - A connected MySQL connection
"""
questions = [
inquirer.List('u',
message=query_message,
choices=list_users(mydb)
)
]
return inquirer.prompt(questions)['u'] | 8ab2adb27f73b5581bc48c8cc4cbc2888a21753f | 3,655,921 |
from typing import Optional
import hashlib
def generate_abl_contract_for_lateral_stage(
lateral_stage: LateralProgressionStage,
parent_blinding_xkey: CCoinExtKey,
start_block_num: int,
creditor_control_asset: CreditorAsset,
debtor_control_asset: DebtorAsset,
bitcoin_asset: BitcoinAsset,
first_stage_input_descriptor: Optional[BlindingInputDescriptor] = None
) -> int:
"""
Generate the main contract code and accompanying data,
and store all the info in vertical stage objects
"""
assert start_block_num > 0
lstage = lateral_stage
plan = lstage.plan
lstage_blinding_xkey = safe_derive(
parent_blinding_xkey, STAGE_NEXT_LEVEL_PATH
)
# Need blinding factors and input descriptors ready
# before we can generate the scripts
for vstage in lstage.vertical_stages:
blinding_xkey = safe_derive(
lstage_blinding_xkey, f'{vstage.index_m}h')
blinding_factor = hashlib.sha256(
safe_derive(blinding_xkey, STAGE_BLINDING_FACTOR_PATH)
).digest()
asset_blinding_factor = hashlib.sha256(
safe_derive(blinding_xkey, STAGE_BLINDING_ASSET_FACTOR_PATH)
).digest()
if lstage.level_n == 0 and vstage.index_m == 0:
assert first_stage_input_descriptor is not None
contract_input_descriptor = first_stage_input_descriptor
first_stage_input_descriptor = None
else:
assert first_stage_input_descriptor is None
contract_input_descriptor = BlindingInputDescriptor(
asset=plan.collateral.asset,
amount=plan.collateral.amount,
blinding_factor=Uint256(blinding_factor),
asset_blinding_factor=Uint256(asset_blinding_factor),
)
vstage.blinding_data = VerticalProgressionStageBlindingData(
blinding_xkey, contract_input_descriptor
)
collateral_grab_outs_hash = \
get_hash_of_collateral_forfeiture_checked_outs(
lstage.vertical_stages[-1],
creditor_control_asset, debtor_control_asset, bitcoin_asset)
total_vstages = 0
# Need to process in reverse, because scripts in earlier stages
# depend on scripts in later stages
for vstage in reversed(lstage.vertical_stages):
total_vstages += 1
if vstage.next_lateral_stage:
total_vstages += generate_abl_contract_for_lateral_stage(
vstage.next_lateral_stage,
vstage.blinding_data.blinding_xkey,
start_block_num,
creditor_control_asset,
debtor_control_asset,
bitcoin_asset
)
full_repayment_cod = get_full_repayment_checked_outs_data(
vstage,
creditor_control_asset,
debtor_control_asset,
bitcoin_asset,
)
partial_repayment_cod = get_partial_repayment_checked_outs_data(
vstage,
creditor_control_asset,
debtor_control_asset,
bitcoin_asset,
)
revoc_cod = get_revocation_tx_checked_outs_data(
vstage,
creditor_control_asset,
bitcoin_asset
)
stage_script, checked_outs_hashes = \
generate_script_and_checked_outs_hashes(
vstage,
creditor_control_asset,
debtor_control_asset,
start_block_num,
full_repayment_checked_outs_data=full_repayment_cod,
partial_repayment_checked_outs_data=partial_repayment_cod,
revoc_checked_outs_data=revoc_cod,
hash_of_collateral_grab_outputs_data=collateral_grab_outs_hash,
)
vstage.script_data = VerticalProgressionStageScriptData(
stage_script, checked_outs_hashes
)
return total_vstages | 2c9b47666c3fb5abf78b8a7d007d1258930f1068 | 3,655,923 |
def force_norm():
"""perform normalization simulation"""
norm = meep.Simulation(cell_size=cell,
boundary_layers=[pml],
geometry=[],
resolution=resolution)
norm.init_fields()
source(norm)
flux_inc = meep_ext.add_flux_plane(norm, fcen, df, nfreq, [0,0,0], [W, W, 0])
norm.run(until_after_sources=meep.stop_when_fields_decayed(.5*um, decay,
pt=meep.Vector3(0,0,0), decay_by=1e-3))
return {'frequency': np.array(meep.get_flux_freqs(flux_inc)), 'area': (W)**2,
'incident': np.asarray(meep.get_fluxes(flux_inc))} | e5c9e6255568e52d0cb30504cd22f610b6f6e5d9 | 3,655,925 |
def search(coordinates):
"""Search for closest known locations to these coordinates
"""
gd = GeocodeData()
return gd.query(coordinates) | c9191a06b085c61b547136166cb43a24789d95cb | 3,655,926 |
from pathlib import Path
def get_all_apis_router(_type: str, root_path: str) -> (Path, Path):
"""Return api files and definition files just put the file on folder swagger."""
swagger_path = Path(root_path)
all_files = list(x.name for x in swagger_path.glob("**/*.yaml"))
schemas_files = [x for x in all_files if "schemas" in x]
api_files = [x for x in all_files if "schemas" not in x and "main" not in x]
return api_files if _type == "api" else schemas_files | eab89c870447e3f1abd72529de37d645de3be612 | 3,655,927 |
def get_cached_patches(dataset_dir=None):
"""
Finds the cached patches (stored as images) from disk and returns their paths as a list of tuples
:param dataset_dir: Path to the dataset folder
:return: List of paths to patches as tuples (path_to_left, path_to_middle, path_to_right)
"""
if dataset_dir is None:
dataset_dir = config.DATASET_DIR
cache_dir = join(dataset_dir, 'cache')
frame_paths = [join(cache_dir, x) for x in listdir(cache_dir)]
frame_paths = [x for x in frame_paths if is_image(x)]
frame_paths.sort()
tuples = []
for i in range(len(frame_paths) // config.MAX_SEQUENCE_LENGTH):
foo = (frame_paths[i * config.MAX_SEQUENCE_LENGTH + ix] for ix in range(config.MAX_SEQUENCE_LENGTH))
tuples.append(list(foo))
return tuples | 7990b592ddc9b93e04b11c4ae65f410c6afc15d7 | 3,655,928 |
def complex_mse(y_true: tf.Tensor, y_pred: tf.Tensor):
"""
Args:
y_true: The true labels, :math:`V \in \mathbb{C}^{B \\times N}`
y_pred: The true labels, :math:`\\widehat{V} \in \mathbb{C}^{B \\times N}`
Returns:
The complex mean squared error :math:`\\boldsymbol{e} \in \mathbb{R}^B`,
where given example :math:`\\widehat{V}_i \in \mathbb{C}^N`,
we have :math:`e_i = \\frac{\|V_i - \\widehat{V}_i\|^2}{N}`.
"""
real_loss = tf.losses.mse(tf.math.real(y_true), tf.math.real(y_pred))
imag_loss = tf.losses.mse(tf.math.imag(y_true), tf.math.imag(y_pred))
return (real_loss + imag_loss) / 2 | 9dc8699312926b379619e56a29529fe2762d68a9 | 3,655,929 |
def expand_not(tweets):
"""
DESCRIPTION:
In informal speech, which is widely used in social media, it is common to use contractions of words
(e.g., don't instead of do not).
This may result in misinterpreting the meaning of a phrase especially in the case of negations.
This function expands these contractions and other similar ones (e.g it's --> it is etc...).
INPUT:
tweets: Series of a set of tweets as a python strings
OUTPUT:
Series of filtered tweets
"""
tweets = tweets.str.replace('n\'t', ' not', case=False)
tweets = tweets.str.replace('i\'m', 'i am', case=False)
tweets = tweets.str.replace('\'re', ' are', case=False)
tweets = tweets.str.replace('it\'s', 'it is', case=False)
tweets = tweets.str.replace('that\'s', 'that is', case=False)
tweets = tweets.str.replace('\'ll', ' will', case=False)
tweets = tweets.str.replace('\'l', ' will', case=False)
tweets = tweets.str.replace('\'ve', ' have', case=False)
tweets = tweets.str.replace('\'d', ' would', case=False)
tweets = tweets.str.replace('he\'s', 'he is', case=False)
tweets = tweets.str.replace('what\'s', 'what is', case=False)
tweets = tweets.str.replace('who\'s', 'who is', case=False)
tweets = tweets.str.replace('\'s', '', case=False)
for punct in ['!', '?', '.']:
regex = "(\\"+punct+"( *)){2,}"
tweets = tweets.str.replace(regex, punct+' <repeat> ', case=False)
return tweets | 66f4ed5c7321fe7bf5ea0d350980394a235d99e6 | 3,655,930 |
def parse_filter_kw(filter_kw):
"""
Return a parsed filter keyword and boolean indicating if filter is a hashtag
Args:
:filter_kw: (str) filter keyword
Returns:
:is_hashtag: (bool) True, if 'filter_kw' is hashtag
:parsed_kw: (str) parsed 'filter_kw' (lowercase, without '#', ...)
"""
filter_kw = filter_kw.strip()
is_hashtag = filter_kw.startswith('#')
parsed_kw = parse_string(filter_kw, remove=('#', "'")).lower()
return (is_hashtag, parsed_kw) | 253d7d5f1aaf6ab3838e7fb3ba395a919f29b70e | 3,655,931 |
def get_branch_index(BRANCHES, branch_name):
"""
Get the place of the branch name in the array of BRANCHES so will know into which next branch to merge - the next one in array.
"""
i = 0
for branch in BRANCHES:
if branch_name == branch:
return i
else:
i = i + 1 | c983bab67b3aa0cd1468c39f19732395c7e376f9 | 3,655,932 |
from bs4 import BeautifulSoup
def prettify_save(soup_objects_list, output_file_name):
"""
Saves the results of get_soup() function to a text file.
Parameters:
-----------
soup_object_list:
list of BeautifulSoup objects to be saved to the text file
output_file_name:
entered as string with quotations and with extension .txt , used to name the output text file
This function can work independent of the rest of the library.
Note:
Unique to Windows, open() needs argument: encoding = 'utf8' for it to work.
"""
prettified_soup = [BeautifulSoup.prettify(k) for k in soup_objects_list]
custom_word_added = [m + 'BREAKHERE' for m in prettified_soup]
one_string = "".join(custom_word_added)
# unique to Windows, open() needs argument: encoding = "utf8"
with open(output_file_name, 'w') as file:
file.write(one_string)
return None | 3de5b7df49837c24e89d2ded286c0098069945fd | 3,655,933 |
def determine_required_bytes_signed_integer(value: int) -> int:
"""
Determines the number of bytes that are required to store value
:param value: a SIGNED integer
:return: 1, 2, 4, or 8
"""
value = ensure_int(value)
if value < 0:
value *= -1
value -= 1
if (value >> 7) == 0:
return 1
if (value >> 15) == 0:
return 2
if (value >> 31) == 0:
return 4
if (value >> 63) == 0:
return 8
raise IntegerLargerThan64BitsException | 231e6f1fc239da5afe7f7600740ace846125e7f5 | 3,655,934 |
from bs4 import BeautifulSoup
def scrape_cvs():
"""Scrape and return CVS data."""
page_headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.150 Safari/537.36",
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9"}
page = get_resource(CVS_ROOT + CVS_VACCINE_PAGE, page_headers)
soup = BeautifulSoup(page.content, 'html.parser')
modals = [elem for elem in soup.find_all(
class_='modal__box') if elem.get('id').startswith('vaccineinfo')]
state_urls = {}
for modal in modals:
state = modal.get('id').split('-')[-1]
state_urls[state] = CVS_ROOT + \
modal.find(class_='covid-status').get('data-url')
state_dfs = []
state_headers = {
'authority': 'www.cvs.com',
'user-agent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.150 Safari/537.36",
'accept': '*/*',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'cors',
'sec-fetch-dest': 'empty',
'referer': 'https://www.cvs.com/immunizations/covid-19-vaccine',
'accept-language': 'en-US,en;q=0.9',
'referrerPolicy': 'strict-origin-when-cross-origin',
'mode': 'cors',
'credentials': 'include'
}
for state, url in state_urls.items():
print(url)
state_response = get_resource(url, state_headers)
state_df = cvs_json_to_df(state, state_response.json())
state_dfs.append(state_df)
return pd.concat(state_dfs) | 2f2f59b3477297f475d1749ff2a35c2682361cfd | 3,655,935 |
def _to_original(sequence, result):
""" Cast result into the same type
>>> _to_original([], ())
[]
>>> _to_original((), [])
()
"""
if isinstance(sequence, tuple):
return tuple(result)
if isinstance(sequence, list):
return list(result)
return result | 7b9d8d1d2b119d61b43dde253d8d3c48bd0e45b8 | 3,655,936 |
def get_B_R(Rdot):
"""Get B_R from Q, Qdot"""
return Rdot | 696932b9bf423289bdcf91287b0d789007322852 | 3,655,939 |
def run_coroutine_with_span(span, coro, *args, **kwargs):
"""Wrap the execution of a Tornado coroutine func in a tracing span.
This makes the span available through the get_current_span() function.
:param span: The tracing span to expose.
:param coro: Co-routine to execute in the scope of tracing span.
:param args: Positional args to func, if any.
:param kwargs: Keyword args to func, if any.
"""
with span_in_stack_context(span=span):
return coro(*args, **kwargs) | 95672b0a1ecf7b8b86dff09835fa9b3c10f7fad2 | 3,655,940 |
def calc_bin_centre(bin_edges):
"""
Calculates the centre of a histogram bin from the bin edges.
"""
return bin_edges[:-1] + np.diff(bin_edges) / 2 | 780a02dc9372670ae53fb4d85e216458e7d83975 | 3,655,941 |
def to_matrix(dG, tG, d_mat, t_mat, label_mat, bridges):
"""
Parameters:
tG: target graph
dG: drug graph
d_mat: drug feature matrix
t_mat: target feature matrix
label_mat: label matrix
bridges: known links between drugs and targets
Return:
d_feature, t_feature
"""
drug_feature, target_feature = {},{}
new_label = set()
for d,t,i in label_mat:
if d in dG.nodes and t in tG.nodes:
#d_vector = np.zeros(d_mat[d].shape)
#t_vector = np.zeros(t_mat[t].shape)
#if i == 1:
d_vector = d_mat[d]
t_vector = t_mat[t]
addressed_d = set()
addressed_t = set()
for link in bridges:
if link[0] in dG.nodes and link[1] in tG.nodes:
if nx.has_path(dG, d, link[0]) and nx.has_path(tG, t, link[1]):
if link[0] not in addressed_d:
#print(f'di: {d}, dl: {link[0]}')
max_sim_d = max_sim(d,link[0],dG)
d_vector = sim_vec(d_vector, d_mat[link[0]],max_sim_d)
addressed_d.add(link[0])
elif link[1] not in addressed_t:
#print(f'tj: {t}, tl: {link[1]}')
max_sim_t = max_sim(t,link[1],tG)
t_vector = sim_vec(t_vector, t_mat[link[1]],max_sim_t)
addressed_t.add(link[1])
drug_feature[d] = d_vector
target_feature[t] = t_vector
new_label.add((d,t,i))
return drug_feature, target_feature, new_label | 54a8ad910f78eca383eba90bd5f6bf6088145630 | 3,655,942 |
def ensureList(obj):
""" ensures that object is list """
if isinstance(obj, list):
return obj # returns original lis
elif hasattr(obj, '__iter__'): # for python 2.x check if obj is iterablet
return list(obj) # converts to list
else:
return [obj] | f845658fda36a583ac54caed1e6493d331c910fa | 3,655,943 |
import torch
def gelu_impl(x):
"""OpenAI's gelu implementation."""
return 0.5 * x * (1.0 + torch.tanh(0.7978845608028654 * x * (1.0 + 0.044715 * x * x))) | 15ced02b61d6e8c526bc60d2a5214f83183946c2 | 3,655,944 |
def get_shape(kind='line', x=None, y=None, x0=None, y0=None, x1=None, y1=None, span=0, color='red', dash='solid',
width=1,
fillcolor=None, fill=False, opacity=1, xref='x', yref='y'):
"""
Returns a plotly shape
Parameters:
-----------
kind : string
Shape kind
line
rect
circle
x : float
x values for the shape.
This assumes x0=x1
x0 : float
x0 value for the shape
x1 : float
x1 value for the shape
y : float
y values for the shape.
This assumes y0=y1
y0 : float
y0 value for the shape
y1 : float
y1 value for the shape
color : string
color for shape line
dash : string
line style
solid
dash
dashdot
dot
width : int
line width
fillcolor : string
shape fill color
fill : bool
If True then fill shape
If not fillcolor then the
line color will be used
opacity : float [0,1]
opacity of the fill
xref : string
Sets the x coordinate system
which this object refers to
'x'
'paper'
'x2' etc
yref : string
Sets the y coordinate system
which this object refers to
'y'
'paper'
'y2' etc
"""
if x1 is None:
if x0 is None:
if x is None:
xref = 'paper'
x0 = 0
x1 = 1
else:
x0 = x1 = x
else:
x1 = x0
else:
x
if y1 is None:
if y0 is None:
if y is None:
yref = 'paper'
y0 = 0
y1 = 1
else:
y0 = y1 = y
else:
y1 = y0
shape = {'x0': x0,
'y0': y0,
'x1': x1,
'y1': y1,
'line': {
'color': normalize(color),
'width': width,
'dash': dash
},
'xref': xref,
'yref': yref
}
if kind == 'line':
shape['type'] = 'line'
elif kind == 'circle':
shape['type'] = 'circle'
elif kind == 'rect':
shape['type'] = 'rect'
else:
raise Exception("Invalid or unkown shape type : {0}".format(kind))
if (fill or fillcolor) and kind != 'line':
fillcolor = color if not fillcolor else fillcolor
fillcolor = to_rgba(normalize(fillcolor), opacity)
shape['fillcolor'] = fillcolor
return shape | b639869eca941d2c91d44549aa751c51e033fe00 | 3,655,945 |
from typing import List
def clean_row(elements: List[Tag]) -> List[Tag]:
"""
Clean MathML row, removing children that should not be considered tokens or child symbols.
One example of cleaning that should take place here is removing 'd' and 'δ' signs that are
used as derivatives, instead of as identifiers.
"""
# Remove whitespace between elements.
elements = [e for e in elements if not (isinstance(e, str) and e.isspace())]
# Remove quantifiers and double bars.
elements = [e for e in elements if e.text not in ["∀", "∃"]]
elements = [e for e in elements if e.text not in ["|", "∥"]]
# Remove 'd's and 'δ's used as signs for derivatives.
derivatives_cleaned = []
DERIVATIVE_GLYPHS = ["d", "δ", "∂"]
for i, e in enumerate(elements):
is_derivative_symbol = (
# Is the glyph a derivative sign?
e.name == "mi"
and e.text in DERIVATIVE_GLYPHS
# Is the next element a symbol?
and (i < len(elements) - 1 and _is_identifier(elements[i + 1]))
# Is the element after that either not a symbol, or another derivative sign?
and (
i == len(elements) - 2
or not _is_identifier(elements[i + 2])
or elements[i + 2].text in DERIVATIVE_GLYPHS
)
)
if not is_derivative_symbol:
derivatives_cleaned.append(e)
elements = derivatives_cleaned
return elements | 527cb06ddb19d9fb25e5805c49f903254813c4e8 | 3,655,946 |
def models(estimators, cv_search, transform_search):
"""
Grid search prediction workflows. Used by bll6_models, test_models, and product_models.
Args:
estimators: collection of steps, each of which constructs an estimator
cv_search: dictionary of arguments to LeadCrossValidate to search over
transform_search: dictionary of arguments to LeadTransform to search over
Returns: a list drain.model.Predict steps constructed by taking the product of
the estimators with the the result of drain.util.dict_product on each of
cv_search and transform_search.
Each Predict step contains the following in its inputs graph:
- lead.model.cv.LeadCrossValidate
- lead.model.transform.LeadTransform
- drain.model.Fit
"""
steps = []
for cv_args, transform_args, estimator in product(
dict_product(cv_search), dict_product(transform_search), estimators):
cv = lead.model.cv.LeadCrossValidate(**cv_args)
cv.name = 'cv'
X_train = Call('__getitem__', inputs=[MapResults([cv], {'X':'obj', 'train':'key',
'test':None, 'aux':None})])
mean = Call('mean', inputs=[X_train])
mean.name = 'mean'
X_impute = Construct(data.impute,
inputs=[MapResults([cv], {'aux':None, 'test':None, 'train':None}),
MapResults([mean], 'value')])
cv_imputed = MapResults([X_impute, cv], ['X', {'X':None}])
cv_imputed.target = True
transform = lead.model.transform.LeadTransform(inputs=[cv_imputed], **transform_args)
transform.name = 'transform'
fit = model.Fit(inputs=[estimator, transform], return_estimator=True)
fit.name = 'fit'
y = model.Predict(inputs=[fit, transform],
return_feature_importances=True)
y.name = 'predict'
y.target = True
steps.append(y)
return steps | 2a3044a9cc994f18e37337a7e58e9fb9e5ef05d1 | 3,655,947 |
from datetime import datetime
import pytz
def xml_timestamp(location='Europe/Prague'):
"""Method creates timestamp including time zone
Args:
location (str): time zone location
Returns:
str: timestamp
"""
return datetime.datetime.now(pytz.timezone(location)).isoformat() | a2883e269c8f9ae8ffd723b7b0205d931453e358 | 3,655,948 |
def transform_postorder(comp, func):
"""Traverses `comp` recursively postorder and replaces its constituents.
For each element of `comp` viewed as an expression tree, the transformation
`func` is applied first to building blocks it is parameterized by, then the
element itself. The transformation `func` should act as an identity function
on the kinds of elements (computation building blocks) it does not care to
transform. This corresponds to a post-order traversal of the expression tree,
i.e., parameters are alwaysd transformed left-to-right (in the order in which
they are listed in building block constructors), then the parent is visited
and transformed with the already-visited, and possibly transformed arguments
in place.
NOTE: In particular, in `Call(f,x)`, both `f` and `x` are arguments to `Call`.
Therefore, `f` is transformed into `f'`, next `x` into `x'` and finally,
`Call(f',x')` is transformed at the end.
Args:
comp: The computation to traverse and transform bottom-up.
func: The transformation to apply locally to each building block in `comp`.
It is a Python function that accepts a building block at input, and should
return either the same, or transformed building block at output. Both the
intput and output of `func` are instances of `ComputationBuildingBlock`.
Returns:
The result of applying `func` to parts of `comp` in a bottom-up fashion.
Raises:
TypeError: If the arguments are of the wrong computation_types.
NotImplementedError: If the argument is a kind of computation building block
that is currently not recognized.
"""
py_typecheck.check_type(comp,
computation_building_blocks.ComputationBuildingBlock)
if isinstance(
comp,
(computation_building_blocks.CompiledComputation,
computation_building_blocks.Data, computation_building_blocks.Intrinsic,
computation_building_blocks.Placement,
computation_building_blocks.Reference)):
return func(comp)
elif isinstance(comp, computation_building_blocks.Selection):
return func(
computation_building_blocks.Selection(
transform_postorder(comp.source, func), comp.name, comp.index))
elif isinstance(comp, computation_building_blocks.Tuple):
return func(
computation_building_blocks.Tuple([(k, transform_postorder(
v, func)) for k, v in anonymous_tuple.to_elements(comp)]))
elif isinstance(comp, computation_building_blocks.Call):
transformed_func = transform_postorder(comp.function, func)
if comp.argument is not None:
transformed_arg = transform_postorder(comp.argument, func)
else:
transformed_arg = None
return func(
computation_building_blocks.Call(transformed_func, transformed_arg))
elif isinstance(comp, computation_building_blocks.Lambda):
transformed_result = transform_postorder(comp.result, func)
return func(
computation_building_blocks.Lambda(
comp.parameter_name, comp.parameter_type, transformed_result))
elif isinstance(comp, computation_building_blocks.Block):
return func(
computation_building_blocks.Block(
[(k, transform_postorder(v, func)) for k, v in comp.locals],
transform_postorder(comp.result, func)))
else:
raise NotImplementedError(
'Unrecognized computation building block: {}'.format(str(comp))) | 964e55dc33acf978cae3f058397c9b355cae9af7 | 3,655,949 |
def bytes_to_unicode_records(byte_string, delimiter, encoding):
""" Convert a byte string to a tuple containing an array of unicode
records and any remainder to be used as a prefix next time. """
string = byte_string.decode(encoding)
records = string.split(delimiter)
return (records[:-1], records[-1].encode(encoding)) | ccc3591551a6b316843cc8eafb33e45627eac752 | 3,655,950 |
def administrator():
"""Returns a :class:`t_system.administration.Administrator` instance."""
return Administrator() | e473ee2e86f66f96a5cf3e09ac4a052e32a279b9 | 3,655,951 |
import numpy
def ocr(path, lang='eng'):
"""Optical Character Recognition function.
Parameters
----------
path : str
Image path.
lang : str, optional
Decoding language. Default english.
Returns
-------
"""
image = Image.open(path)
vectorized_image = numpy.asarray(image).astype(numpy.uint8)
vectorized_image[:, :, 0] = 0
vectorized_image[:, :, 2] = 0
im = cv2.cvtColor(vectorized_image, cv2.COLOR_RGB2GRAY)
return pytesseract.image_to_string(
Image.fromarray(im),
lang=lang
)[:5] | 9b484779a34d65bb25e57baeaa371205c65d2dc6 | 3,655,952 |
def is_solution(system, point):
"""
Checks whether the point is the solution for a given constraints system.
"""
a = np.array(system)
# get the left part
left = a[:, :-1] * point
left = sum(left.T)
# get the right part
right = (-1) * a[:, -1]
return np.all(left <= right) | 774987f22a57f3a6d68b5d51f7b3a42d945a1eff | 3,655,956 |
def git_config_bool(option: str) -> bool:
"""
Return a boolean git config value, defaulting to False.
"""
return git_config(option) == "true" | 1ed48faa3c6de43fc8a732aed2fde1a81bc75949 | 3,655,957 |
def read_configs(paths):
"""
Read yaml files and merged dict.
"""
eths = dict()
vlans = dict()
bonds = dict()
for path in paths:
cfg = read_config(path)
ifaces = cfg.get("network", dict())
if "ethernets" in ifaces:
eths.update(ifaces["ethernets"])
if "vlans" in ifaces:
vlans.update(ifaces["vlans"])
if "bonds" in ifaces:
bonds.update(ifaces["bonds"])
return dict(
ethernets=eths,
vlans=vlans,
bonds=bonds
) | 998c75b9d75e4d6404c265a67c31bb88b9b7d435 | 3,655,958 |
import json
def get_client():
""" generates API client with personalized API key """
with open("api_key.json") as json_file:
apikey_data = json.load(json_file)
api_key = apikey_data['perspective_key']
# Generates API client object dynamically based on service name and version.
perspective = discovery.build('commentanalyzer', 'v1alpha1',
developerKey=api_key)
dlp = discovery.build('dlp', 'v2', developerKey=api_key)
return (apikey_data, perspective, dlp) | be68eeeedf9c3dcf3f3991b70db18cd3032d2218 | 3,655,959 |
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
settings['route_patterns'] = {
'villages': '/geography.cfm',
'parameters': '/thesaurus.cfm',
'sources': '/bibliography.cfm',
'languages': '/languages.cfm',
'florafauna': '/florafauna.cfm',
'bangime': '/bangime.cfm',
'file': r'/_files/{id:[^/\.]+}',
'file_alt': r'/_files/{id:[^/\.]+}.{ext}',
}
config = Configurator(settings=settings)
config.include('clldmpg')
config.register_menu(
('dataset', partial(menu_item, 'dataset', label='Home')),
('languages', partial(menu_item, 'languages')),
('values', partial(menu_item, 'values', label='Lexicon')),
('parameters', partial(menu_item, 'parameters', label='Thesaurus')),
('villages', partial(menu_item, 'villages', label='Villages')),
('florafauna', partial(menu_item, 'florafauna', label='Flora-Fauna')),
#('contributors', partial(menu_item, 'contributors', label='Project members')),
('sources', partial(menu_item, 'sources', label='Materials')),
#('bangime', partial(menu_item, 'bangime', label='Bangime')),
#('other', partial(menu_item, 'other', label='Other Languages')),
('movies', partial(menu_item, 'movies', label='Videos')),
)
home_comp = config.registry.settings['home_comp']
home_comp = [
'bangime', 'other',
'contributors'] + home_comp
config.add_settings({'home_comp': home_comp})
config.register_resource('village', models.Village, IVillage, with_index=True)
config.register_resource('movie', models.Movie, IMovie, with_index=True)
config.register_resource('file', models.File, IFile, with_index=True)
config.registry.registerUtility(CustomFactoryQuery(), ICtxFactoryQuery)
config.add_page('bangime')
config.add_page('florafauna')
config.add_page('other')
config.add_page('typology')
return config.make_wsgi_app() | 52779856e4eeecb9673707b707d51322decda729 | 3,655,960 |
def overrides(pattern, norminput):
"""Split a date subfield into beginning date and ending date. Needed for fields with
multiple hyphens.
Args:
pattern: date pattern
norminput: normalized date string
Returns:
start date portion of pattern
start date portion of norminput
end date portion of pattern
end date portion of norminput
"""
if pattern == 'NNNN-NNNN-':
return pattern[:4], pattern[5:9], norminput[:4], norminput[5:9]
if pattern == 'NNNN?-NNNN? av. j.-c.':
return pattern[:5], pattern[6:], norminput[:5], norminput[6:]
if pattern == 'NN---NNNN':
return pattern[:4], pattern[5:], norminput[:4], norminput[5:]
if pattern == 'NNNN-NNNN av. j.-c.':
return pattern[:4], pattern[5:], norminput[:4], norminput[5:]
if pattern == 'NNNN--':
return pattern[:4], None, norminput[:4], None
if pattern == 'NNNN-NN--':
return pattern[:4], pattern[5:], norminput[:4], norminput[5:]
if pattern == 'f. NNNN-NN-NN':
return pattern, None, norminput, None
if pattern == 'NNNN?-NNNN av. j.-c.':
return pattern[:5], pattern[6:], norminput[:5], norminput[6:]
if pattern == 'NN-NN-NNNN':
return pattern, None, norminput, None
if pattern == '-NNNN-':
return None, pattern[:-1], None, norminput[:-1]
if pattern == 'NNNN--NNNN':
return pattern[:4], pattern[6:], norminput[:4], norminput[6:]
if pattern == 'NNNN-NN--?':
return pattern[:4], pattern[5:], norminput[:4], norminput[5:]
if pattern == 'NNNNNNNN':
return pattern, None, norminput, None
if pattern == 'NN..-NNNN av. j.-c.':
return pattern[:4], pattern[5:], norminput[:4], norminput[5:]
if pattern == 'NNNN-NNN-':
return pattern[:4], pattern[5:], norminput[:4], norminput[5:]
if pattern == 'fl. NNNN-NNN-':
return pattern[:8], pattern[9:], norminput[:8], norminput[9:]
if pattern == 'NNNN av. j.-c.-NNNN':
return pattern[:-5], pattern[-4:], norminput[:-5], norminput[-4:]
if pattern == 'NNNN-NN-NN-':
return pattern[:-1], None, norminput[:-1], None
if pattern == 'NN-- -NNNN':
return pattern[:4], pattern[-4:], norminput[:4], norminput[-4:]
if pattern == 'NNNN-NN-NN':
return pattern, None, norminput, None
if pattern == 'NN..-NNNN? av. j.-c.':
return pattern[:4], pattern[5:], norminput[:4], norminput[5:]
if pattern == 'NNNN--...':
return pattern[:4], pattern[6:], norminput[:4], norminput[6:]
if pattern == 'fl. NNN--NNNN':
return pattern[:8], pattern[-4:], norminput[:8], norminput[-4:]
if pattern == 'fl. NN---NNNN':
return pattern[:8], pattern[-4:], norminput[:8], norminput[-4:]
if pattern == 'NN---NNNN?':
return pattern[:4], pattern[5:], norminput[:4], norminput[5:]
if pattern == 'fl. NNN--NNN-':
return pattern[:8], pattern[-4:], norminput[:8], norminput[-4:]
if pattern == 'NN..-NN.. av. j.-c.':
return pattern[:4], pattern[5:], norminput[:4], norminput[5:]
if pattern == 'NN--':
return pattern, None, norminput, None
if pattern == 'fl. NN--':
return pattern, None, norminput, None
if pattern == 'NN..?-NN..? av. j.-c.':
return pattern[:5], pattern[6:], norminput[:5], norminput[6:]
if pattern == 'NNN-NNN av. j.-c.':
return pattern[:3], pattern[4:], norminput[:3], norminput[4:]
if pattern == 'NN---NN--':
return pattern[:4], pattern[5:], norminput[:4], norminput[5:]
if pattern == 'NNN--NNN-':
return pattern[:4], pattern[5:], norminput[:4], norminput[5:]
if pattern == 'NN-..-NN..':
return pattern[:2]+pattern[3:5], pattern[6:], norminput[:2]+norminput[3:5], norminput[6:]
if pattern == 'NN---':
return pattern[:-1], None, norminput[:-1], None
if pattern == 'NNNN?-NNNN?':
return pattern[:5], pattern[6:], norminput[:5], norminput[6:]
if pattern == 'NNNN-NN-NN-NNNN-NN-NN':
return pattern[:10], pattern[11:], norminput[:10], norminput[11:]
if pattern == 'NNNN-N-NN':
return pattern, None, norminput, None
if pattern == 'NNNN-N-N':
return pattern, None, norminput, None
if pattern == 'NNNN-NNNN-NN-NN':
return pattern[:4], pattern[6:], norminput[:4], norminput[6:]
if pattern == 'NNNN-N-NN-NNNN-N-NN':
return pattern[:9], pattern[10:], norminput[:9], norminput[10:]
if pattern == 'NNNN-NN-NN-NNNN':
return pattern[:10], pattern[11:], norminput[:10], norminput[11:]
if pattern == 'NNNN-N-NN-NNNN-N-N':
return pattern[:9], pattern[10:], norminput[:9], norminput[10:]
if pattern == 'NNNN-N-N-NNNN-N-NN':
return pattern[:8], pattern[9:], norminput[:8], norminput[9:]
if pattern == 'NNNN-N-NN-NNNN-NN-NN':
return pattern[:9], pattern[10:], norminput[:9], norminput[10:]
if pattern == 'NNNN-NN-NN-NNNN-N-NN':
return pattern[:10], pattern[11:], norminput[:10], norminput[11:]
if pattern == 'month NN NNNN-NNNN-NN-NN':
p = pattern.split('-', 1)
n = norminput.split('-', 1)
return p[0], p[1], n[0], n[1]
if pattern == 'NN month NNNN-NNNN-NN-NN':
p = pattern.split('-', 1)
n = norminput.split('-', 1)
return p[0], p[1], n[0], n[1]
if pattern == 'NNNN-N-N-NNNN-N-N':
return pattern[:8], pattern[9:], norminput[:8], norminput[9:]
if pattern == '-NNNN-NN-NN':
return None, pattern[1:], None, norminput[1:]
if pattern == 'NNNN-NN-NN-month NN NNNN':
return pattern[:10], pattern[11:], norminput[:10], norminput[11:]
if pattern == 'NNNN-N-N-NNNN-NN-NN':
return pattern[:8], pattern[9:], norminput[:8], norminput[9:]
if pattern == 'NNNN-NN-NN-NNNN-N-N':
return pattern[:10], pattern[11:], norminput[:10], norminput[11:]
if pattern == 'NNNN-NN-NN-NN month NNNN':
return pattern[:10], pattern[11:], norminput[:10], norminput[11:]
if pattern == 'NNNN-NN-N-NNNN-N-NN':
return pattern[:9], pattern[10:], norminput[:9], norminput[10:]
if pattern == 'NNNN-N-NN-NNNN-NN-N':
return pattern[:9], pattern[10:], norminput[:9], norminput[10:]
if pattern == 'month N NNNN-NNNN-NN-NN':
p = pattern.split('-', 1)
n = norminput.split('-', 1)
return p[0], p[1], n[0], n[1]
if pattern == 'NNNN-N-N-month NN NNNN':
return pattern[:8], pattern[9:], norminput[:8], norminput[9:]
if pattern == 'NNNN-NN-NN-month N NNNN':
return pattern[:10], pattern[11:], norminput[:10], norminput[11:]
if pattern == 'NNNN-NN-NN-N month NNNN':
return pattern[:10], pattern[11:], norminput[:10], norminput[11:]
if pattern == 'NNNN-NN-N-NNNN-NN-NN':
return pattern[:9], pattern[10:], norminput[:9], norminput[10:]
if pattern == 'N month NNNN-NNNN-NN-NN':
p = pattern.split('-', 1)
n = norminput.split('-', 1)
return p[0], p[1], n[0], n[1]
if pattern == 'NNNN-NN-NN-NNNN-NN-N':
return pattern[:10], pattern[11:], norminput[:10], norminput[11:]
if pattern == 'NNNN-NN-NN-NNNN/NN/NN':
return pattern[:10], pattern[11:], norminput[:10], norminput[11:]
if pattern == 'NNNN-N-N-NNNN-NN-N':
return pattern[:8], pattern[9:], norminput[:8], norminput[9:]
if pattern == 'NNNN-N-NN-NNNN':
return pattern[:9], pattern[10:], norminput[:9], norminput[10:]
if pattern == 'NNNN-NN-NN-month NNNN':
return pattern[:10], pattern[11:], norminput[:10], norminput[11:]
if pattern == 'NNNN-NN-N-NNNN-N-N':
return pattern[:9], pattern[10:], norminput[:9], norminput[10:]
if pattern == 'NNNN-NN-NN}}':
return pattern, None, norminput, None
if pattern == 'NN-NN-NNNN-NN-NN-NNNN':
return pattern[:10], pattern[11:], norminput[:10], norminput[11:]
if pattern == 'NNNN-N-N-month N NNNN':
return pattern[:8], pattern[9:], norminput[:8], norminput[9:]
if pattern == 'NNNN-NNNN-N-NN':
return pattern[:4], pattern[5:], norminput[:4], norminput[5:]
if pattern == 'NNNN-N-NN-month NNNN':
return pattern[:9], pattern[10:], norminput[:9], norminput[10:]
if pattern == 'c. NNNN-NNNN-NN-NN':
return pattern[:7], pattern[8:], norminput[:7], norminput[8:]
if pattern == 'NNNN-N-N-NNNN':
pattern[:4], pattern[5:], norminput[:4], norminput[5:]
return None | 5e005b0537d123607225ed82163cac07f578a755 | 3,655,961 |
def defaultPolynomialLoad():
"""
pytest fixture that returns a default PolynomialLoad object
:return: PolynomialLoad object initialized with default values
"""
return PolynomialStaticLoad() | 75b989dc80e7ccf4e9a091de2dcdeb8758b465b3 | 3,655,962 |
def calc_pi(iteration_count, cores_usage):
"""
We calculate pi using Ulam's Monte Carlo method. See the module
documentation. The calculated value of pi is returned.
We use a process pool to offer the option of spreading the
calculation across more then one core.
iteration_count is the number of iterations that are run.
cores_usage is the number of processes to use.
"""
# We're using a multiprocessing pool here, to take advantage of
# multi-core CPUs.
# Calculate stuff for the pool.
pool_size = cores_usage
iterations_per_process = iteration_count // pool_size
work_list = [iterations_per_process] * pool_size
work_list[0] += iteration_count % pool_size
# Set up the pool.
calc_pool = mp.Pool(pool_size)
# Use the pool to obtain random points in the unit circle.
# We'll let the system determine the chunk size.
in_circle_total = sum(calc_pool.map(
count_is_in_cirle,
work_list))
# Finish the calculation. in_circle_total, divided by the total
# number of iterations, is the area of the unit circle
# relative to the [-1, 1] square. Multiply by 4, which is the area
# of the [-1, 1] square, to get the area of the unit circle.
# .NOTE. If you modify this program to run in Python 2.7, remember
# to modify this calculation to use floating point division (or
# import division from future).
return 4 * in_circle_total / iteration_count | 7b4db8f0936995f46a42fedb4d5539cd3057eb01 | 3,655,963 |
def pair_range_from_to(x): # cpdef pair_range(np.ndarray[long,ndim=1] x):
"""
Returns a list of half-cycle-amplitudes
x: Peak-Trough sequence (integer list of local minima and maxima)
This routine is implemented according to
"Recommended Practices for Wind Turbine Testing - 3. Fatigue Loads", 2. edition 1990, Appendix A
except that a list of half-cycle-amplitudes are returned instead of a from_level-to_level-matrix
"""
x = x - np.min(x)
k = np.max(x)
n = x.shape[0]
S = np.zeros(n + 1)
A = np.zeros((k + 1, k + 1))
S[1] = x[0]
ptr = 1
p = 1
q = 1
f = 0
# phase 1
while True:
p += 1
q += 1
# read
S[p] = x[ptr]
ptr += 1
if q == n:
f = 1
while p >= 4:
#print S[p - 3:p + 1]
#print S[p - 2], ">", S[p - 3], ", ", S[p - 1], ">=", S[p - 3], ", ", S[p], ">=", S[p - 2], (S[p - 2] > S[p - 3] and S[p - 1] >= S[p - 3] and S[p] >= S[p - 2])
#print S[p - 2], "<", S[p - 3], ", ", S[p - 1], "<=", S[p - 3], ", ", S[p], "<=", S[p - 2], (S[p - 2] < S[p - 3] and S[p - 1] <= S[p - 3] and S[p] <= S[p - 2])
#print (S[p - 2] > S[p - 3] and S[p - 1] >= S[p - 3] and S[p] >= S[p - 2]) or (S[p - 2] < S[p - 3] and S[p - 1] <= S[p - 3] and S[p] <= S[p - 2])
if (S[p - 2] > S[p - 3] and S[p - 1] >= S[p - 3] and S[p] >= S[p - 2]) or \
(S[p - 2] < S[p - 3] and S[p - 1] <= S[p - 3] and S[p] <= S[p - 2]):
A[S[p - 2], S[p - 1]] += 1
A[S[p - 1], S[p - 2]] += 1
S[p - 2] = S[p]
p -= 2
else:
break
if f == 1:
break # q==n
# phase 2
q = 0
while True:
q += 1
if p == q:
break
else:
#print S[q], "to", S[q + 1]
A[S[q], S[q + 1]] += 1
return A | 96d86079b971bda58fd2d0af440feecc8fa4c1fd | 3,655,965 |
def serialize_action(
action: RetroReaction, molecule_store: MoleculeSerializer
) -> StrDict:
"""
Serialize a retrosynthesis action
:param action: the (re)action to serialize
:param molecule_store: the molecule serialization object
:return: the action as a dictionary
"""
dict_ = action.to_dict()
dict_["mol"] = molecule_store[dict_["mol"]]
dict_["class"] = f"{action.__class__.__module__}.{action.__class__.__name__}"
return dict_ | f35c0a34cc6778a39c991edafdda6bd30aea4886 | 3,655,966 |
import string
def complement(s):
"""
Return complement of 's'.
"""
c = string.translate(s, __complementTranslation)
return c | 7dab43db51bc5a3bb7321deebdb8122792f08d86 | 3,655,968 |
import copy
def get_state_transitions(actions):
"""
get the next state
@param actions:
@return: tuple (current_state, action, nextstate)
"""
state_transition_pairs = []
for action in actions:
current_state = action[0]
id = action[1][0]
next_path = action[1][1]
next_state = copy.deepcopy(current_state)
if 'NoTrans' not in id:
# change the state
next_state[id] = next_path
state_transition_pairs.append((current_state, action[1], next_state))
return state_transition_pairs | bbed37ed6469f5635fbc65fa07195114b4bb3dac | 3,655,969 |
import struct
def parse_pascal_string(characterset, data):
"""
Read a Pascal string from a byte array using the given character set.
:param characterset: Character set to use to decode the string
:param data: binary data
:return: tuple containing string and number of bytes consumed
"""
string_size_format, string_size_size, character_size = get_string_size_format(characterset)
if len(data) < string_size_size:
raise FileParseException("String size truncated")
string_size = struct.unpack("<" + string_size_format, data[0:string_size_size])[0] * character_size
string_data = data[string_size_size:string_size_size + string_size]
result = string_data.decode(CHARACTER_SETS[characterset])
total_size = string_size_size + string_size
return result, total_size | eabdfe1f6fb864eead1345016495f64c5457727e | 3,655,970 |
def folder(initial=None, title='Select Folder'):
"""Request to select an existing folder or to create a new folder.
Parameters
----------
initial : :class:`str`, optional
The initial directory to start in.
title : :class:`str`, optional
The text to display in the title bar of the dialog window.
Returns
-------
:class:`str`
The name of the selected folder or :obj:`None` if the user cancelled
the request to select a folder.
"""
app, title = _get_app_and_title(title)
name = QtWidgets.QFileDialog.getExistingDirectory(app.activeWindow(), title, initial)
return name if len(name) > 0 else None | 60331e1a89241595e09e746901fff656f8d4365a | 3,655,971 |
import tqdm
from typing import Any
from typing import Optional
def tqdm_hook(t: tqdm) -> Any:
"""Progressbar to visualisation downloading progress."""
last_b = [0]
def update_to(b: int = 1, bsize: int = 1, t_size: Optional[int] = None) -> None:
if t_size is not None:
t.total = t_size
t.update((b - last_b[0]) * bsize)
last_b[0] = b
return update_to | ff075a946ea9cf2d124d9d5e93fb83d31f2e0623 | 3,655,973 |
def check_regular_timestamps(
time_series: TimeSeries, time_tolerance_decimals: int = 9, gb_severity_threshold: float = 1.0
):
"""If the TimeSeries uses timestamps, check if they are regular (i.e., they have a constant rate)."""
if (
time_series.timestamps is not None
and len(time_series.timestamps) > 2
and check_regular_series(series=time_series.timestamps, tolerance_decimals=time_tolerance_decimals)
):
timestamps = np.array(time_series.timestamps)
if timestamps.size * timestamps.dtype.itemsize > gb_severity_threshold * 1e9:
severity = Severity.HIGH
else:
severity = Severity.LOW
return InspectorMessage(
severity=severity,
message=(
"TimeSeries appears to have a constant sampling rate. "
f"Consider specifying starting_time={time_series.timestamps[0]} "
f"and rate={time_series.timestamps[1] - time_series.timestamps[0]} instead of timestamps."
),
) | 0c44f2b26a71e76b658180e1817cc3dfbeb375e0 | 3,655,974 |
def test_device_bypass(monkeypatch):
"""Test setting the bypass status of a device."""
_was_called = False
def _call_bypass(url, body, **kwargs):
nonlocal _was_called
assert url == "/appservices/v6/orgs/Z100/device_actions"
assert body == {"action_type": "BYPASS", "device_id": [6023], "options": {"toggle": "OFF"}}
_was_called = True
return StubResponse(None, 204)
api = call_cbcloud_api()
patch_cbc_sdk_api(monkeypatch, api, POST=_call_bypass)
api.device_bypass([6023], False)
assert _was_called | 17e2a2f1f7c8ef1a7ef32e8aacb40f8f7fe16c53 | 3,655,975 |
import re
import importlib
def import_config_module( cfg_file ):
""" Returns valid imported config module.
"""
cfg_file = re.sub( r'\.py$', '', cfg_file )
cfg_file = re.sub( r'-', '_', cfg_file )
mod_name = 'config.' + cfg_file
cfg_mod = importlib.import_module( mod_name )
if not hasattr( cfg_mod, 'pre_start_config' ):
raise ImportError( 'Config file must define \'pre_start_config\' method' )
if not hasattr( cfg_mod, 'post_start_config' ):
raise ImportError( 'Config file must define \'post_start_config\' method' )
return cfg_mod | 4cb25a56df0f26f0f3c4917aad2ca4cd40e4797f | 3,655,976 |
import multiprocessing
def process_batches(args, batches):
"""Runs a set of batches, and merges the resulting output files if more
than one batch is included.
"""
nbatches = min(args.nbatches, len(batches))
pool = multiprocessing.Pool(nbatches, init_worker_thread)
try:
batches = pool.imap(run_batch, batches, 1)
if not merge_batch_results(batches):
pool.terminate()
pool.join()
return 1
pool.close()
pool.join()
return 0
except:
pool.terminate()
pool.join()
raise | dbd893773e6a5fed1d68a48c875741e4ce963ae6 | 3,655,977 |
def tripledes_cbc_pkcs5_decrypt(key, data, iv):
"""
Decrypts 3DES ciphertext in CBC mode using either the 2 or 3 key variant
(16 or 24 byte long key) and PKCS#5 padding.
:param key:
The encryption key - a byte string 16 or 24 bytes long (2 or 3 key mode)
:param data:
The ciphertext - a byte string
:param iv:
The initialization vector - a byte string 8-bytes long
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by OpenSSL
:return:
A byte string of the plaintext
"""
if len(key) != 16 and len(key) != 24:
raise ValueError(pretty_message(
'''
key must be 16 bytes (2 key) or 24 bytes (3 key) long - is %s
''',
len(key)
))
if len(iv) != 8:
raise ValueError(pretty_message(
'''
iv must be 8 bytes long - is %s
''',
len(iv)
))
ctx = triple_des(key, mode=DES_CBC, IV=iv, padmode=DES_PAD_PKCS5)
return ctx.decrypt(data) | bf6d7efaade2cb7ce2f6abf7cea89a04fdbb3897 | 3,655,978 |
def kruskal_chi2_test(data=None, alpha=0.05, precision=4):
"""
col = 要比較的 target
row = data for each target
"""
if type(data) == pd.DataFrame:
data = data.copy().to_numpy()
alldata = np.concatenate(data.copy())
else:
alldata = np.concatenate(data.copy())
k = data.shape[1]
alldata.sort()
tmp_df = pd.DataFrame(({'value': alldata}))
tmp_df['rank'] = tmp_df.index + 1 # rank
value_to_rank = tmp_df.groupby('value').mean().reset_index()
T = []
sample_rank_df = []
for i in range(k):
samp = pd.DataFrame(
{'value': data[:, i][~np.isnan(data[:, i])]})
samp = pd.merge(samp, value_to_rank)
sample_rank_df.append(samp)
T.append(samp['rank'].sum())
n = [len(data[:, i][~np.isnan(data[:, i])]) for i in range(k)]
# print(T)
# print(n)
rule_of_five_str = ""
if (np.sum(np.array(n) < 5) > 0):
rule_of_five_str += "!(At least one sample size is less than 5)"
else:
rule_of_five_str += "(All sample size >= 5)"
N = np.sum(n)
t_over_n = 0
for i in range(k):
t_over_n += T[i] ** 2 / n[i]
H = 12 / N / (N + 1) * t_over_n - 3 * (N + 1)
p_value = 1 - stats.chi2.cdf(H, k - 1)
chi2_stat = stats.chi2.ppf(1 - alpha, k - 1)
result_dict = {'H': H, 'p-value': p_value,
'T': T, 'sample_rank_df': sample_rank_df}
flag = p_value < alpha
result = f'''======= Kruskal-Wallis Test with Chi-squared Test =======
{rule_of_five_str}
H statistic value (observed) = {H:.{precision}f}
chi2 critical value = {chi2_stat:.{precision}f}
p-value = {p_value:.{precision}f} ({inter_p_value(p_value)})
Reject H_0 (Not all {k} population locations are the same) → {flag}
'''
print(result)
return result_dict | 3b89e14e7072cbb6375b0c1ead8320c5643aacd1 | 3,655,979 |
def add_new_action(action, object_types, preferred, analyst):
"""
Add a new action to CRITs.
:param action: The action to add to CRITs.
:type action: str
:param object_types: The TLOs this is for.
:type object_types: list
:param preferred: The TLOs this is preferred for.
:type preferred: list
:param analyst: The user adding this action.
:returns: True, False
"""
action = action.strip()
idb_action = Action.objects(name=action).first()
if not idb_action:
idb_action = Action()
idb_action.name = action
idb_action.object_types = object_types
idb_action.preferred = []
prefs = preferred.split('\n')
for pref in prefs:
cols = pref.split(',')
if len(cols) != 3:
continue
epa = EmbeddedPreferredAction()
epa.object_type = cols[0].strip()
epa.object_field = cols[1].strip()
epa.object_value = cols[2].strip()
idb_action.preferred.append(epa)
try:
idb_action.save(username=analyst)
except ValidationError:
return False
return True | 2b54c3766d9793a1c6598402bf7a5b1103bb324b | 3,655,980 |
import multiprocessing
import asyncio
def test_PipeJsonRpcSendAsync_5():
"""
Specia test case.
Two messages: the first message times out, the second message is send before the response
from the first message is received. Verify that the result returned in response to the
second message is received. (We discard the result of the message that is timed out.)
"""
def method_handler1():
ttime.sleep(0.7)
return 39
def method_handler2():
ttime.sleep(0.2)
return 56
conn1, conn2 = multiprocessing.Pipe()
pc = PipeJsonRpcReceive(conn=conn2, name="comm-server")
pc.add_method(method_handler1, "method1")
pc.add_method(method_handler2, "method2")
pc.start()
async def send_messages():
p_send = PipeJsonRpcSendAsync(conn=conn1, name="comm-client")
p_send.start()
# Submit multiple messages at once. Messages should stay at the event loop
# and be processed one by one.
with pytest.raises(CommTimeoutError):
await p_send.send_msg("method1", timeout=0.5)
result = await p_send.send_msg("method2", timeout=0.5)
assert result == 56, "Incorrect result received"
p_send.stop()
asyncio.run(send_messages())
pc.stop() | a1939e03f4c9992d84ac52a35b975f20077a2161 | 3,655,981 |
import re
def tpc(fastas, **kw):
"""
Function to generate tpc encoding for protein sequences
:param fastas:
:param kw:
:return:
"""
AA = kw['order'] if kw['order'] is not None else 'ACDEFGHIKLMNPQRSTVWY'
encodings = []
triPeptides = [aa1 + aa2 + aa3 for aa1 in AA for aa2 in AA for aa3 in AA]
AADict = {}
for i in range(len(AA)):
AADict[AA[i]] = i
for i in fastas:
name, sequence = i[0], re.sub('-', '', i[1])
code = [name]
tmpCode = [0] * 8000
for j in range(len(sequence) - 3 + 1):
tmpCode[AADict[sequence[j]] * 400 + AADict[sequence[j + 1]] * 20 + AADict[sequence[j + 2]]] = \
tmpCode[AADict[sequence[j]] * 400 + AADict[sequence[j + 1]] * 20 + AADict[sequence[j + 2]]] + 1
if sum(tmpCode) != 0:
tmpCode = [i / sum(tmpCode) for i in tmpCode]
code = code + tmpCode
encodings.append(code)
return encodings | b8017356980b266d78d85a867aee97c0d79ec5e5 | 3,655,983 |
def _uninstall_flocker_centos7():
"""
Return an ``Effect`` for uninstalling the Flocker package from a CentOS 7
machine.
"""
return sequence([
run_from_args([
b"yum", b"erase", b"-y", b"clusterhq-python-flocker",
]),
run_from_args([
b"yum", b"erase", b"-y", b"clusterhq-release",
]),
]) | 0d8f068857cbc25743b644d067fe70efffb644f0 | 3,655,984 |
import requests
def authenticate(username, password):
"""Authenticate with the API and get a token."""
API_AUTH = "https://api2.xlink.cn/v2/user_auth"
auth_data = {'corp_id': "1007d2ad150c4000", 'email': username,
'password': password}
r = requests.post(API_AUTH, json=auth_data, timeout=API_TIMEOUT)
try:
return (r.json()['access_token'], r.json()['user_id'])
except KeyError:
raise(LaurelException('API authentication failed')) | 9675227b5ff4f58d79bafffc0407366a26d638bd | 3,655,986 |
def filter_hashtags_users(DATAPATH, th, city):
"""
cleans target_hashtags by removing hashtags that are used by less than 2 users
replaces hahstags by ht_id and saves to idhashtags.csv
creates entropy for each ht_id and saves to hashtag_id_entropies.csv
prints std output
:param DATAPATH:
:param th: hashtags are too popular if more than th% of users share them
:param city:
:return:
"""
ht = pd.read_csv(DATAPATH + city + ".target_hashtags")
print ("ht.shape", ht.shape)
ht["hashtags"] = ht['hashtags'].astype('category')
ht["ht_id"] = ht["hashtags"].cat.codes
ht.drop('hashtags', axis=1, inplace=True)
#arrmult = []
entarr = []
gp = ht.groupby('ht_id')
# cnt_df = gp.size().reset_index(name='sizes')
# hashtags are too popular if more than th% of users share them
max_df_ht = th * len(ht.uid.unique())
print ("max_df_ht", max_df_ht)
# removing hashtags that are used by less than 2 users and more than th% of users
for htid, group in gp:
user_count = len(group['uid'].value_counts().values)
if user_count > 1 and user_count <= max_df_ht:
e = entropy(group['uid'].value_counts().values)
c = len(group)
entarr.append([htid, e, c])
#arrmult.append(htid)
# save entropies of hashtags for other calculations
entdf = pd.DataFrame(data=entarr, columns=['ht_id', 'entropy', 'counts'])
sortt = entdf.sort_values(by='entropy')
sortt.to_csv(DATAPATH + "counts_entropies.csv", index=False)
# filtered hashtag df
ht2 = ht[ht.ht_id.isin(entdf.ht_id)]
print ("after removing too popular and too rare hts", ht2.shape)
ht2.to_csv(DATAPATH + str(th) + "filtered_hashtags.csv", index=False)
return entdf, ht2 | 60e0b02f9bbdccae32958717fd8608aa1932386e | 3,655,987 |
def cluster_set_state(connection: 'Connection', state: int, query_id=None) -> 'APIResult':
"""
Set cluster state.
:param connection: Connection to use,
:param state: State to set,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status if a value
is written, non-zero status and an error description otherwise.
"""
return __cluster_set_state(connection, state, query_id) | 88b05a617e17574961e44d5a88bec1ac4da0be95 | 3,655,988 |
def get_all(data, path):
"""Returns a list with all values in data matching the given JsonPath."""
return [x for x in iterate(data, path)] | 592fee87b3b4be171d4e4a19b013b99551768f75 | 3,655,989 |
def extract_information_from_blomap(oneLetterCodes):
"""
extracts isoelectric point (iep) and
hydrophobicity from blomap for each aminoacid
Parameters
----------
oneLetterCodes : list of Strings/Chars
contains oneLetterCode for each aminoacid
Returns
-------
float, float
iep, hydrophobicity
"""
letter_encodings = []
for x in oneLetterCodes:
letter_encodings.append(extended_blomap[x.upper()])
isoelectric_point = []
hydrophobicity = []
for element in letter_encodings:
isoelectric_point.append([element[7]])
hydrophobicity.append([element[8]])
return isoelectric_point, hydrophobicity | d36e16a0e35d744f1001752c98d09035b3e581c6 | 3,655,990 |
def partitions(n):
"""
Return a sequence of lists
Each element is a list of integers which sum to n -
a partition n.
The elements of each partition are in descending order
and the sequence of partitions is in descending lex order.
>>> list(partitions(4))
[[3, 1], [2, 2], [2, 1, 1], [1, 1, 1, 1]]
"""
return partitions_with_max(n, max=n - 1) | 042759c97031baee7c958d59b3a432b52111a696 | 3,655,991 |
def create_request_element(channel_id, file_info, data_id, annotation):
"""
create dataset item from datalake file
:param channel_id:
:param file_id:
:param file_info:
:param label_metadata_key:
:return:
"""
data_uri = 'datalake://{}/{}'.format(channel_id, file_info.file_id)
data = {
'source_data': [
{
'data_uri': data_uri,
'data_type': file_info.content_type
}
],
'attributes': {
'classification': annotation,
'id': data_id
}
}
return data | 9fad37428e2608d47b2d0d57d075c0fbd9292b46 | 3,655,992 |
from typing import Mapping
from typing import Iterable
def _categorise(obj, _regex_adapter=RegexAdapter):
"""
Check type of the object
"""
if obj is Absent:
return Category.ABSENT
obj_t = type(obj)
if issubclass(obj_t, NATIVE_TYPES):
return Category.VALUE
elif callable(obj):
return Category.CALLABLE
elif _regex_adapter.check(obj):
return Category.REGEX
elif issubclass(obj_t, Mapping):
return Category.DICT
elif issubclass(obj_t, Iterable):
return Category.ITERABLE
else: # catch-all for types like decimal.Decimal, uuid.UUID, et cetera
return Category.VALUE | 549f21bee43f619fea7c2a09940cda1ce03e4e8c | 3,655,993 |
def remove_key(d, key):
"""Safely remove the `key` from the dictionary.
Safely remove the `key` from the dictionary `d` by first
making a copy of dictionary. Return the new dictionary together
with the value stored for the `key`.
Parameters
----------
d : dict
The dictionary from which to remove the `key`.
key :
The key to remove
Returns
-------
v :
The value for the key
r : dict
The dictionary with the key removed.
"""
r = dict(d)
v = r[key]
del r[key]
return v, r | 5695b18675b52f4ca8bc3cba1ed0104425e7a04f | 3,655,994 |
import csv
import six
def tasks_file_to_task_descriptors(tasks, retries, input_file_param_util,
output_file_param_util):
"""Parses task parameters from a TSV.
Args:
tasks: Dict containing the path to a TSV file and task numbers to run
variables, input, and output parameters as column headings. Subsequent
lines specify parameter values, one row per job.
retries: Number of retries allowed.
input_file_param_util: Utility for producing InputFileParam objects.
output_file_param_util: Utility for producing OutputFileParam objects.
Returns:
task_descriptors: an array of records, each containing the task-id,
task-attempt, 'envs', 'inputs', 'outputs', 'labels' that defines the set of
parameters for each task of the job.
Raises:
ValueError: If no job records were provided
"""
task_descriptors = []
path = tasks['path']
task_min = tasks.get('min')
task_max = tasks.get('max')
# Load the file and set up a Reader that tokenizes the fields
param_file = dsub_util.load_file(path)
reader = csv.reader(param_file, delimiter='\t')
# Read the first line and extract the parameters
header = six.advance_iterator(reader)
job_params = parse_tasks_file_header(header, input_file_param_util,
output_file_param_util)
# Build a list of records from the parsed input file
for row in reader:
# Tasks are numbered starting at 1 and since the first line of the TSV
# file is a header, the first task appears on line 2.
task_id = reader.line_num - 1
if task_min and task_id < task_min:
continue
if task_max and task_id > task_max:
continue
if len(row) != len(job_params):
dsub_util.print_error('Unexpected number of fields %s vs %s: line %s' %
(len(row), len(job_params), reader.line_num))
# Each row can contain "envs", "inputs", "outputs"
envs = set()
inputs = set()
outputs = set()
labels = set()
for i in range(0, len(job_params)):
param = job_params[i]
name = param.name
if isinstance(param, job_model.EnvParam):
envs.add(job_model.EnvParam(name, row[i]))
elif isinstance(param, job_model.LabelParam):
labels.add(job_model.LabelParam(name, row[i]))
elif isinstance(param, job_model.InputFileParam):
inputs.add(
input_file_param_util.make_param(name, row[i], param.recursive))
elif isinstance(param, job_model.OutputFileParam):
outputs.add(
output_file_param_util.make_param(name, row[i], param.recursive))
task_descriptors.append(
job_model.TaskDescriptor({
'task-id': task_id,
'task-attempt': 1 if retries else None
}, {
'labels': labels,
'envs': envs,
'inputs': inputs,
'outputs': outputs
}, job_model.Resources()))
# Ensure that there are jobs to execute (and not just a header)
if not task_descriptors:
raise ValueError('No tasks added from %s' % path)
return task_descriptors | 7c195e8c09b439d39fca105fa3303f74c43538c1 | 3,655,995 |
def spreadplayers(self: Client, x: RelativeFloat, y: RelativeFloat,
spread_distance: float, max_range: float,
victim: str) -> str:
"""Spreads players."""
return self.run('spreadplayers', x, y, spread_distance, max_range, victim) | 6577d7209d19a142ae9e02804b84af921df3224c | 3,655,997 |
def get_version():
"""Returns single integer number with the serialization version"""
return 2 | f25ad858441fcbb3b5353202a53f6ebaa8874e4d | 3,655,998 |
def format_result(func):
"""包装结果格式返回给调用者"""
@wraps(func)
def wrapper(*args, **kwargs):
ret = {}
try:
data = func(*args, **kwargs)
if type(data) is Response:
return data
ret['data'] = data
ret['success'] = True
ret['message'] = 'Succeed'
except Exception as e:
ret['message'] = str(e)
ret['data'] = None
ret['success'] = False
logger.info(f"request_{func}, result: {ret}")
return ret
return wrapper | 53109217a9fe6fbc00250a7b8dfd6b295e47e12b | 3,655,999 |
def writeData(filename, data):
"""
MBARBIER: Taken/adapted from https://github.com/ChristophKirst/ClearMap/blob/master/ClearMap/IO/TIF.py
Write image data to tif file
Arguments:
filename (str): file name
data (array): image data
Returns:
str: tif file name
"""
d = len(data.shape);
if d == 2:
tiff.imsave(filename, data.transpose([0,1]));
elif d == 3:
tiff.imsave(filename, data.transpose([2,0,1]), photometric = 'minisblack', planarconfig = 'contig', bigtiff = True);
elif d == 4:
#tiffile (z,y,x,c)
tiff.imsave(filename, data.transpose([0,1,2,3]), photometric = 'minisblack', planarconfig = 'contig', bigtiff = True);
else:
raise RuntimeError('writing multiple channel data to tif not supported!');
return filename; | cc4414b9f52413bebc422032f796cd242ecc8ef4 | 3,656,000 |
def get_trigger_function(trigger_message, waiter):
"""Función auxiliar que genera un activador
Args:
trigger_message: mensaje o instruccion para continuar.
waiter: función que pausa el flujo de instrucciones.
"""
def trigger_function():
# Se imprime la instrucción para detonar el activador
print(trigger_message)
waiter()
# Se reproduce un audio confirmando que el activador fue
# detonado.
reproducir_audio(TRIGGER_AUDIO_PATH)
return trigger_function | b389dd93631ae396c65d5653da6cea3ec91b3556 | 3,656,001 |
def find_peaks(amplitude):
"""
A value is considered to be a peak if it is higher than its four closest
neighbours.
"""
# Pad the array with -1 at the beginning and the end to avoid overflows.
padded = np.concatenate((-np.ones(2), amplitude, -np.ones(2)))
# Shift the array by one/two values to the left/right
shifted_l2 = padded[:-4]
shifted_l1 = padded[1:-3]
shifted_r1 = padded[3:-1]
shifted_r2 = padded[4:]
# Compare the original array with the shifted versions.
peaks = ((amplitude >= shifted_l2) & (amplitude >= shifted_l1) &
(amplitude >= shifted_r1) & (amplitude >= shifted_r2))
return peaks | 192f25bbc491c7e880ff5363098b0ced29f37567 | 3,656,002 |
from typing import Optional
def sync(
*,
client: Client,
json_body: CustomFieldOptionsCreateRequestBody,
) -> Optional[CustomFieldOptionsCreateResponseBody]:
"""Create Custom Field Options
Create a custom field option. If the sort key is not supplied, it'll default to 1000, so the option
appears near the end of the list.
Args:
json_body (CustomFieldOptionsCreateRequestBody): Example: {'custom_field_id':
'01FCNDV6P870EA6S7TK1DSYDG0', 'sort_key': 10, 'value': 'Product'}.
Returns:
Response[CustomFieldOptionsCreateResponseBody]
"""
return sync_detailed(
client=client,
json_body=json_body,
).parsed | 6215e704be4bbc32e52fb03817e00d7fd5338365 | 3,656,003 |
def decrypt_with_private_key(data, private_key):
"""Decrypts the PKCS#1 padded shared secret using the private RSA key"""
return _pkcs1_unpad(private_key.decrypt(data)) | f1dac9113fb97f62afab524239e38c6cb196c989 | 3,656,004 |
import warnings
def deprecated (func):
"""
This is a decorator which can be used to mark functions as deprecated. It
will result in a warning being emitted when the function is used.
:param func: original function
:type func: :any:`collections.Callable`
:return: decorated func
:rtype: :any:`collections.Callable`
"""
@wraps(func)
def newFunc (*args, **kwargs):
warnings.warn("Call to deprecated function %s." % func.__name__,
category=DeprecationWarning, stacklevel=2)
return func(*args, **kwargs)
return newFunc | ba237c30d97013080bd84569af1817685023dab6 | 3,656,006 |
import re
def prediction():
"""
A function that takes a JSON with two fields: "text" and "maxlen"
Returns: the summarized text of the paragraphs.
"""
print(request.form.values())
paragraphs = request.form.get("paragraphs")
paragraphs = re.sub("\d+", "", paragraphs)
maxlen = int(request.form.get("maxlen"))
summary = summarizer(paragraphs, max_length=maxlen, min_length=49, do_sample=False)
return render_template('index.html', prediction_text = '" {} "'.format(summary[0]["summary_text"])), 200 | a1bdf996908e65e3087ed4ffe27402c9763b4d69 | 3,656,007 |
def is_reviewer(user):
"""Return True if this user is a financial aid reviewer"""
# no need to cache here, all the DB lookups used during has_perm
# are already cached
return user.has_perm("finaid.review_financial_aid") | e3c599f78eb51c33ab48e3760c0f2965ba305916 | 3,656,008 |
def getLogMessage(commitSHA):
"""Get the log message for a given commit hash"""
output = check_output(["git","log","--format=%B","-n","1",commitSHA])
return output.strip() | 2d42e587da57faff5366fc656e8d45a8fa797208 | 3,656,009 |
def sup(content, accesskey:str ="", class_: str ="", contenteditable: str ="",
data_key: str="", data_value: str="", dir_: str="", draggable: str="",
hidden: str="", id_: str="", lang: str="", spellcheck: str="",
style: str="", tabindex: str="", title: str="", translate: str=""):
"""
Returns superscript.\n
`content`: Contents of the superscript.\n
"""
g_args = global_args(accesskey, class_, contenteditable, data_key, data_value,
dir_, draggable, hidden, id_, lang, spellcheck, style,
tabindex, title, translate)
return f"<sup {g_args}>{content}</sup>\n" | dff8635d98f68e5b024fe23cbeaa6a1a9884222f | 3,656,011 |
def isnonempty(value):
"""
Return whether the value is not empty
Examples::
>>> isnonempty('a')
True
>>> isnonempty('')
False
:param value: string to validate whether value is not empty
"""
return value != '' | 0250cb455d8f77027d5cde9101a24683950bbdb2 | 3,656,012 |
def InstallSystem(config, deployment, options):
"""Install the local host from the sysync deployment configuration files."""
installed = {}
# Create fresh temporary directory
Log('Clearing temporary deployment path: %s' % config['deploy_temp_path'])
run.Run('/bin/rm -rf %s' % config['deploy_temp_path'])
run.Run('/bin/mkdir -p %s' % config['deploy_temp_path'])
# Install the packages
result = InstallPackagesLocally(config, deployment, options)
return result | 642eda86228e5575bc7267d9b3a5c3ddc055daf4 | 3,656,013 |
def preprocess_input(x):
"""前処理。"""
return tf.keras.applications.imagenet_utils.preprocess_input(x, mode="torch") | 6795c5e571d67a7908edbe3c3ca0ed5e3412d2f0 | 3,656,014 |
def attribute_to_partner_strict(partner, partner_string_or_spec, amount):
"""Return the amount attributable to the given partner."""
spec = (
partner_string_or_spec
if isinstance(partner_string_or_spec, dict)
else parse_partner_string(partner_string_or_spec)
)
if partner not in spec:
raise ValueError("Partner not found in partner string: %s" % partner)
v100 = spec[partner] * float(amount.abs())
f_floor = round if isclose(v100, round(v100)) else floor
v = amount.sign() * 0.01 * f_floor(v100)
return Amount(str(v)).with_commodity(amount.commodity) | d7e00b50e8be010d7896b6c51e1e3fcfe73438d2 | 3,656,015 |
import math
def drawLines(img, lines, color=(255,0,0)):
"""
Draw lines on an image
"""
centroids = list()
r_xs = list()
r_ys = list()
for line_ in lines:
for rho,theta in line_:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
slope = (y1 - y0) / float(x1 - x0)
angle = math.degrees(math.atan(slope))
if abs(angle) > 80:
# print(img.shape[1])
h_layout = line((0, 0), (img.shape[1], 0))
h_layout_lower = line((0, img.shape[0]), (img.shape[1], img.shape[0]))
r = intersection2(h_layout, line((x1, y1), (x2, y2)))
r_lower = intersection2(h_layout_lower, line((x1, y1), (x2, y2)))
# cv2.line(img, (int(x1), int(y1)), (int(x2), int(y2)), color, 2)
# cv2.line(img, (int(r[0]), int(r[1])), (int(r_lower[0]), int(r_lower[1])), color, 2)
# print('min(r, r_lower), max(r, r_lower) :', np.min(np.array([r, r_lower])), np.max(np.array([r, r_lower])))
# min max 의 최소 최대 Range 를 정해주어야 한다. #
if np.min(np.array([r, r_lower])) >= 0 and np.max(np.array([r, r_lower])) < max(img.shape):
center_p = (int((r[0] + r_lower[0]) / 2), int((r[1] + r_lower[1])/ 2))
centroids.append(center_p)
r_xs.append((r[0], r_lower[0]))
r_ys.append((r[1], r_lower[1]))
# cv2.circle(img, center_p, 10, (255, 0, 255), -1)
# cv2.line(img, (int(0), int(0)), (int(0), int(img.shape[0])), color, 2)
# cv2.line(img, (int(img.shape[1]), int(0)), (int(img.shape[1]), int(img.shape[0])), color, 2)
# cv2.circle(img, (0, int(img.shape[0] / 2)), 10, (255, 0, 255), -1)
# cv2.circle(img, (img.shape[1], int(img.shape[0] / 2)), 10, (255, 0, 255), -1)
centroids.append((0, int(img.shape[0] / 2)))
centroids.append((img.shape[1], int(img.shape[0] / 2)))
return r_xs, r_ys, centroids | 5918bb1a81d8efae2874f294d927f7b01527d1d1 | 3,656,016 |
import numpy
def moments_of_inertia(geo, amu=True):
""" principal inertial axes (atomic units if amu=False)
"""
ine = inertia_tensor(geo, amu=amu)
moms, _ = numpy.linalg.eigh(ine)
moms = tuple(moms)
return moms | 34153dba5ea49d457ee97d4024a103b0d05c6bd0 | 3,656,017 |
def greenblatt_earnings_yield(stock, date=None, lookback_period=timedelta(days=0), period='FY'):
"""
:param stock: ticker(s) in question. Can be a string (i.e. 'AAPL') or a list of strings (i.e. ['AAPL', 'BA']).
:param date: Can be a datetime (i.e. datetime(2019, 1, 1)) or list of datetimes. The most recent date of reporting from that date will be used. By default, date=None.
:param lookback_period: lookback from date (used to compare against previous year or quarter etc.) i.e. timedelta(days=90).
:param period: 'FY' for fiscal year, 'Q' for quarter, 'YTD' for calendar year to date, 'TTM' for trailing twelve months.
:return: .. math:: \\text{Greenblatt Earnings Yield} = \\frac{\\text{EBIT}}{\\text{EV}}
"""
return earnings_before_interest_and_taxes(stock=stock, date=date, lookback_period=lookback_period, period=period) \
/ enterprise_value(stock=stock, date=date, lookback_period=lookback_period, period=period) | 333b12b609523ab16eeb1402d0219264a3a159e3 | 3,656,018 |
import shutil
def remove_directory(dir_path):
"""Delete a directory"""
if isdir(dir_path):
try:
shutil.rmtree(dir_path)
return ok_resp(f'Directory removed {dir_path}')
except TypeError as err_obj:
return err_resp(f'Failed to remove directory. {err_obj}')
except FileNotFoundError as err_obj:
return err_resp(f'Directory not found: {err_obj}')
except OSError as err_obj:
return err_resp(f'Failed to delete directory: {err_obj}')
except PermissionError as err_obj:
return err_resp(f'Failed to delete directory: {err_obj}')
return ok_resp(f'Not a directory {dir_path}') | c174568c024cff1948bdf78206e49c2ca40c6b25 | 3,656,019 |
def set_route_queue(path_list,user_position,sudden_id,sudden_xy,pi):
"""
最後の患者が一番近い医師が行くようにする
"""
minimum_dis = 100
minimum_idx = 0
for i in range(len(path_list)):
dis = np.sqrt((user_position[path_list[i][-2]][0] - sudden_xy[0])**2 + (user_position[path_list[i][-2]][1] - sudden_xy[1])**2)
if(dis < minimum_dis):
minimum_dis = dis
minimum_idx = path_list[i][-2]
pi_idx = [i for i, x in enumerate(pi) if x == minimum_idx]
pi.insert(pi_idx[0]+1,sudden_id)
return pi | 0425c3edf2d488680ccb54661e79698a506e4fe4 | 3,656,023 |
def add(x, y):
"""Add two numbers together."""
return x+y | 92015156eac5bc9cc0be3b1812f9c0766f23020c | 3,656,024 |
import requests
def retry_session(tries=2,
backoff_factor=0.1,
status_forcelist=(500, 502, 504),
session=None):
"""
Parameters
----------
tries : int, number of retires.
backoff_factor : A backoff factor to apply between attempts after the
second try (most errors are resolved immediately by a second try without
a delay). urllib3 will sleep for: {backoff factor} * (2 ^ ({number of
total retries} - 1)) seconds. If the backoff_factor is 0.1, then sleep()
will sleep for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be
longer than Retry.BACKOFF_MAX.
status_forcelist :
Retries are made on any HTTP responses in this list. Default values
include the following:
- 500: Internal Server Error.
- 502: Bad Gateway.
- 504: Gateway Timeout.
session
Returns
-------
"""
session = session or requests.Session()
retry = Retry(
total=tries,
read=tries,
connect=tries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist)
adapter = HTTPAdapter(max_retries=retry, pool_block=True)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session | 5766c4623e0e53f4353de1e58080c1ad5c9b4080 | 3,656,026 |
def vc(t, delta, beta):
"""velocity correlation of locus on rouse polymer. beta = alpha/2."""
return ( np.power(np.abs(t - delta), beta)
+ np.power(np.abs(t + delta), beta)
- 2*np.power(np.abs(t), beta)
)/( 2*np.power(delta, beta) ) | 89eff8a8cdb0e84a69e7990ebf0c128ca27ecea8 | 3,656,027 |
def algorithm(name):
"""
A function decorator that is used to add an algorithm's Python class to the
algorithm_table.
Args:
A human readable label for the algorithm that is used to identify it in
the GUI
"""
def decorator(class_):
algorithm_table[name] = class_
return class_
return decorator | 8f67fec3f1933dc0ea041322fcf041f2247bc638 | 3,656,028 |
def comp_easy():
"""Get easy components."""
return Components(ewlaps, gi_setting.DEFAULT_EASY) | d15093dce67657b05665d7a9373d1328b4171f91 | 3,656,029 |
def play(player1, player2, rounds=1, verbose=False, symdict=None):
"""Play a number of `rounds` matches between the two players and return
the score $S = sum_j a_j$, where
a_j = 1 if player1 wone --or-- -1 if player2 wone --or-- 0 otherwise.
"""
if player1 is player2:
raise AttributeError("Players match...")
if player1._rules is not player2._rules:
raise AttributeError("Different rules sets...")
if symdict is None:
symdict = range(len(pl1._rules))
score = [0, 0, 0]
results = ["Player1 wins.", "Tie.", "Player 2 wins."]
playiter = xrange(rounds) if verbose else Progress(xrange(rounds))
for i in playiter:
res1, res2 = player1.play(), player2.play()
player1._memory.append((res1, res2))
player2._memory.append((res2, res1))
resind = 1 - player1._rules[res1][res2]
score[resind] += 1
if verbose:
print("{} vs {}: {}".format(symdict[res1], symdict[res2],
results[resind]))
print(score)
return score | 28e0cc41d664a6681b4af1216d0de6f1a2871f04 | 3,656,030 |
def calc_deltabin_3bpavg(seq, files, bin_freqs, seqtype = "fastq"):
"""
At each position (starting at i), count number of sequences where
region (i):(i+3) is mutated. This is sort of a rolling average and not critical
to the result. It just ends up a bit cleaner than if we looked at a single
base pair since. We are assuming that on average a mutation messes up binding,
however this is not always the case. For example, especially with RNAP, there might
be a couple positions that are not-at-all optimal for DNA binding.
Parameter
---------
seq: wild-type sequence of library region
files: filenames (used to identify bin number, '...bin*.fastq')
bin_freqs: numpy array (np.zeros([# bins, # letters (i.e. 4),
length sequence]) that contained the letter frequences from each
bin.
seqtype: sequence file type (i.e. '.fastq' or '.fasta')
Returns
-------
avgBin_counts: array 1*seqLength; contains counts used to calculate average
of mutated nucleotides at each position.
avgBin-avgbin_WT: average bin of mutated nucleotides at each position
relative to wild-type average bin.
"""
seqLength = len(seq)
avgBin_counts = np.zeros([len(files),seqLength])
avgBin = np.zeros(seqLength)
#filecount = 0
avgbin_WT = 0
for j in range(0,len(files)):
avgbin_WT += ( (j+1)*bin_freqs[j,:,0].sum() )/ bin_freqs[:,:,0].sum()
print('average_bin_WT', avgbin_WT)
for i in range(0,seqLength-2):
for j, fname in enumerate(files):
count = 0
binnumber = int(fname[-7]) - 1
for rec in SeqIO.parse(fname, seqtype):
if (rec.seq[i:(i+2)] != seq[i:(i+2)]):
count += 1
avgBin_counts[binnumber,i] = count
for i in range(0,seqLength-2):
for j in range(0,len(files)):
avgBin[i] += ( (j+1)*avgBin_counts[j,i] )/avgBin_counts[:,i].sum()
return avgBin_counts, (avgBin-avgbin_WT) | 5ea614e7280d6ed288ea03e63e86e3129d4e4994 | 3,656,031 |
def make_right_handed(l_csl_p1, l_p_po):
"""
The function makes l_csl_p1 right handed.
Parameters
----------------
l_csl_p1: numpy.array
The CSL basis vectors in the primitive reference frame of crystal 1.
l_p_po: numpy.array
The primitive basis vectors of the underlying lattice in the orthogonal
reference frame.
Returns
-----------
t1_array: numpy.array
Right handed array
"""
l_csl_po1 = l_p_po.dot(l_csl_p1)
t1_array = np.array(l_csl_p1, dtype='double')
t2_array = np.array(l_csl_p1, dtype='double')
if (nla.det(l_csl_po1) < 0):
t1_array[:, 0] = t2_array[:, 1]
t1_array[:, 1] = t2_array[:, 0]
return t1_array | 3b5e3f21e6da5292fb84eb632ddcfa2ec52507ee | 3,656,032 |
def company(anon, obj, field, val):
"""
Generates a random company name
"""
return anon.faker.company(field=field) | 95580147817a37542f75e2c728941a159cd30bd3 | 3,656,034 |
def delete_schedule():
"""
При GET запросе возвращает страницу для удаления расписания.
При POST запросе, удаляет выбранное расписани
(Запрос на удаление идэт с главной страницы(func index), шаблона(template) функция не имеет).
"""
if not check_admin_status():
flash(f'У вас нет прав для просмотра данной страницы!', 'error')
app.logger.warning(f"Сотрудник с недостаточным уровнем допуска попытался удалить расписание: {get_user_info()}")
return redirect(url_for('index'))
schedule_id = request.args.get('schedule_id')
ScheduleCleaning.query.filter_by(id=schedule_id).delete()
db.session.commit()
return redirect(url_for('index')) | 1e73c757956d4bd78f3a093e2a2ddfde894aeac5 | 3,656,035 |
def map_datapoint(data_point: DATAPOINT_TYPE) -> SFX_OUTPUT_TYPE:
"""
Create dict value to send to SFX.
:param data_point: Dict with values to send
:type data_point: dict
:return: SignalFx data
:rtype: dict
"""
return {
"metric": data_point["metric"],
"value": data_point["value"],
"dimensions": dict(data_point["dimensions"], **default_dimensions) if "dimensions" in data_point else default_dimensions,
} | cf5d7eb1bded092adb2b002ee93ad168e696230a | 3,656,038 |
def write_obs(mdict, obslist, flag=0):
"""
"""
# Print epoch
epoch = mdict['epoch']
res = epoch.strftime("> %Y %m %d %H %M %S.") + '{0:06d}0'.format(int(epoch.microsecond))
# Epoch flag
res += " {0:2d}".format(flag)
# Num sats
res += " {0:2d}".format(len(mdict)-1)
res += '\n'
# For each satellite, print obs
for sat in mdict:
if sat == 'epoch':
continue
res += sat
obstypes = obslist[sat[0]]
for o in obstypes:
try:
meas = mdict[sat][o]
except KeyError:
meas = 0.0
# BeiDou satellites can have long ranges if GEO satellites are used
if meas > 40e6:
meas = 0.0
res += '{0:14.3f}00'.format(meas)
res += '\n'
return res | 5a91b02fce07f455f4442fe6fbf76d3609f5a74e | 3,656,039 |
from typing import Optional
from typing import Union
import fsspec
def open_view(
path: str,
*,
filesystem: Optional[Union[fsspec.AbstractFileSystem, str]] = None,
synchronizer: Optional[sync.Sync] = None,
) -> view.View:
"""Open an existing view.
Args:
path: View storage directory.
filesystem: The file system used to access the view.
synchronizer: The synchronizer used to synchronize the view.
Returns:
The opened view.
Example:
>>> view = open_view("/home/user/myview")
"""
return view.View.from_config(path,
filesystem=filesystem,
synchronizer=synchronizer) | b12471f59ef78e444a43c0e766cb6b4237e65338 | 3,656,040 |
def smi2xyz(smi, forcefield="mmff94", steps=50):
"""
Example:
utils.smi2xyz("CNC(C(C)(C)F)C(C)(F)F")
returns:
C 1.17813 0.06150 -0.07575
N 0.63662 0.20405 1.27030
C -0.86241 0.13667 1.33270
C -1.46928 -1.21234 0.80597
C -0.94997 -2.44123 1.55282
C -2.99527 -1.22252 0.74860
F -1.08861 -1.36389 -0.50896
C -1.34380 0.44926 2.78365
C -0.84421 1.76433 3.34474
F -2.70109 0.48371 2.84063
F -0.94986 -0.53971 3.63106
H 0.78344 0.82865 -0.74701
H 0.99920 -0.92873 -0.50038
H 2.26559 0.18049 -0.03746
H 1.03185 -0.51750 1.87094
H -1.24335 0.93908 0.68721
H -1.29943 -2.47273 2.58759
H -1.27996 -3.36049 1.05992
H 0.14418 -2.47324 1.55471
H -3.35862 -0.36599 0.16994
H -3.34471 -2.11983 0.22567
H -3.46364 -1.21709 1.73400
H -1.20223 2.60547 2.74528
H -1.22978 1.89248 4.36213
H 0.24662 1.79173 3.40731
"""
mol = pybel.readstring("smi", smi)
mol.addh() # add hydrogens, if this function is not called, pybel will output xyz string with no hydrogens.
mol.make3D(forcefield=forcefield, steps=steps)
# possible forcefields: ['uff', 'mmff94', 'ghemical']
mol.localopt()
return _to_pyscf_atom(mol) | 083bbc1a242a3f5f247fc6f7066e099dab654b7a | 3,656,041 |
from typing import Optional
from typing import Tuple
from typing import List
def pgm_to_pointcloud(
depth_image: np.ndarray, color_image: Optional[np.ndarray],
intrinsics: Tuple[float, float, float, float],
distortion: List[float]) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"""Fast conversion of opencv images to pointcloud.
Takes ~7 ms per 1280x720 RGBD on my corp laptop (hirak).
Args:
depth_image: OpenCV image.
color_image: Corresponding color image, if colors for each point is desired.
intrinsics: fx, fy, cx, cy.
distortion: Standard distoriton params k1, k2, p1, p2, [k3, [k4, k5, k6]].
Returns:
points: Nx3 array of points in space.
colors: Nx3 array of colors, each row an RGB. None if color_image is None.
"""
# The code below is optimized for speed, further optimizations may also be
# possible.
x_axis, y_axis = np.mgrid[0:depth_image.shape[1], 0:depth_image.shape[0]]
valid = ~np.isnan(depth_image)
x_axis = x_axis.T[valid]
y_axis = y_axis.T[valid]
depth = depth_image[valid] * _DEPTH_SCALE
x_and_y = np.vstack([x_axis, y_axis]).astype(float)
fx, fy, cx, cy = intrinsics
camera_matrix = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
x_and_y = cv2.undistortPoints(x_and_y, camera_matrix, np.array(distortion))
x_and_y = x_and_y.T.reshape(2, -1)
points = np.vstack([x_and_y * depth, depth]).T
colors = None
if color_image is not None:
colors = color_image[valid]
if len(colors.shape) > 1 and colors.shape[1] == 3:
# OpenCV uses BGR. Point cloud libraries like to use RGB.
colors[:, [0, 2]] = colors[:, [2, 0]]
else:
colors = np.vstack([colors, colors, colors]).T
return points, colors | 574d514c216f0db1f90bf277dc78a5b5dcc2535a | 3,656,042 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.