content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def remove_invalid_chars_from_passage(passage_text):
"""
Return a cleaned passage if the passage is invalid.
If the passage is valid, return None
"""
# Check if any of the characters are invalid
bad_chars = [c for c in passage_text if c in INVALID_PASSAGE_CHARACTERS]
if bad_chars:
for b in set(bad_chars):
passage_text = passage_text.replace(b, '')
return passage_text | 5eeac3393477c45ac361fb2ccbae194c83e47f25 | 3,655,900 |
import locale
def fallback_humanize(date, fallback_format=None, use_fallback=False):
"""
Format date with arrow and a fallback format
"""
# Convert to local timezone
date = arrow.get(date).to('local')
# Set default fallback format
if not fallback_format:
fallback_format = '%Y/%m/%d %H:%M:%S'
# Determine using fallback format or not by a variable
if use_fallback:
return date.datetime.strftime(fallback_format)
try:
# Use Arrow's humanize function
lang, encode = locale.getdefaultlocale()
clock = date.humanize(locale=lang)
except:
# Notice at the 1st time only
if not dg['humanize_unsupported']:
dg['humanize_unsupported'] = True
printNicely(
light_magenta('Humanized date display method does not support your $LC_ALL.'))
# Fallback when LC_ALL is not supported
clock = date.datetime.strftime(fallback_format)
return clock | 06a758cea23978d877d12cfead25b21140370094 | 3,655,901 |
import os
import time
from datetime import datetime
import tarfile
def questbackup(cfg, server):
"""Silly solution to a silly problem."""
if server not in cfg['servers']:
log.warning(f'{server} has been misspelled or not configured!')
elif 'worldname' not in cfg['servers'][server]:
log.warning(f'{server} has no world directory specified!')
elif 'questing' not in cfg['servers'][server]:
log.warning(f'{server} has is not setup for questing backup!')
else:
bpath = cfg['backupspath']
world = cfg['servers'][server]['worldname']
quests = cfg['servers'][server]['questing']
log.info(f'Starting backup for {server}\'s quests...')
if isUp(server):
log.info(f'{server} is running, don\'t care, just want {quests.upper()}!')
sbpath = f'{bpath}/{server}/questing/{quests}'
try:
os.makedirs(sbpath, exist_ok=True)
except Exception as e:
log.error(e + '\nBackup aborted, DANGER! might loose quests!')
return False
else:
log.info('Created missing directories! (if they were missing)')
log.info('Deleting old quest backups...')
now = time()
with os.scandir(sbpath) as d:
for entry in d:
if not entry.name.startswith('.') and entry.is_file():
stats = entry.stat()
if stats.st_mtime < now - (10080 * 60):
try:
os.remove(entry.path)
except OSError as e:
log.error(e)
else:
log.info(f'Deleted {entry.path} for being too old!')
log.info('Creating quest backup...')
bname = datetime.now().strftime('%Y.%m.%d-%H-%M-%S') + f'-{server}-{world}-{quests.replace("/", "_")}.tar.gz'
os.chdir(sbpath)
serverpath = cfg['serverspath']
with tarfile.open(bname, 'w:gz') as tf:
tf.add(f'{serverpath}/{server}/{world}/{quests}', quests)
log.info('Quest backup created!')
if isUp(server):
log.info(f'{server} is running, STILL DON\'T CARE!')
log.info('DONE!') | 35406f7349665f36880f9ca1036f420a2035fb9d | 3,655,902 |
import sys
import os
def find_file_in_pythonpath( filename, subfolder='miri',
walkdir=False, path_only=False):
"""
Find a file matching the given name within the PYTHONPATH.
:Parameters:
filename: str
The name of the file to be located.
subfolder: str, optional (default='miri')
If specified, a preferred subfolder within the PYTHONPATH which,
if it exists, will be searched before the top level directory.
walkdir: bool, optional (default=False)
Set to True if the entire directory tree under each of the
directories in the search path should be searched as well.
By default, only the specific directories listed in the search
path are searched.
N.B. Specifying walkdir=True can slow down a file search
significantly, especially if there are a lot of utilities
installed in PYTHONPATH. The search can be speeded up by
specifying a preferred subfolder.
path_only: bool, optional (default=False)
Set to True to return the path to the folder containing the file
rather than the path to the file itself.
:Returns:
filepath: str
The full path (and name) of the matching file.
If no file is found an empty string is returned.
"""
# Step through each path in the PYTHONPATH
for path in sys.path:
# If there is a preferred subfolder, and it exists
# search this first.
if subfolder:
sfname = os.path.join(path, subfolder)
if os.path.isdir(sfname):
result = _check_folder(sfname, filename, walkdir=walkdir,
path_only=path_only)
if result:
return result
result = _check_folder(path, filename, walkdir=walkdir,
path_only=path_only)
if result:
return result
# No file found - return an empty string.
return '' | 27dc5aebe74af45edeafdba4a3a43f844606e06e | 3,655,903 |
from functools import reduce
def min_column_widths(rows):
"""Computes the minimum column width for the table of strings.
>>> min_column_widths([["some", "fields"], ["other", "line"]])
[5, 6]
"""
def lengths(row): return map(len, row)
def maximums(row1, row2) : return map(max, row1, row2)
return reduce(maximums, map(lengths, rows)) | 36722e4250dde561836c1ea3042b796ed7650986 | 3,655,904 |
def entities(address_book):
"""Get the entities utility."""
return zope.component.getUtility(IEntities) | 6c64c5c8b8d0048425dcd91baf265134fbb2e96e | 3,655,905 |
from renku.core.management.migrations.models.v9 import Project
import pathlib
def generate_dataset_file_url(client, filepath):
"""Generate url for DatasetFile."""
if not client:
return
try:
if not client.project:
return
project = client.project
except ValueError:
metadata_path = client.renku_path.joinpath(OLD_METADATA_PATH)
project = Project.from_yaml(metadata_path)
project_id = urlparse(project._id)
else:
project_id = urlparse(project.id)
filepath = quote(filepath, safe="/")
path = pathlib.posixpath.join(project_id.path, "files", "blob", filepath)
project_id = project_id._replace(path=path)
return project_id.geturl() | 1aa3a97cfff523e0b7d7718c39dfb9935160e193 | 3,655,906 |
def _check_attrs(obj):
"""Checks that a periodic function/method has all the expected attributes.
This will return the expected attributes that were **not** found.
"""
missing_attrs = []
for attr_name in _REQUIRED_ATTRS:
if not hasattr(obj, attr_name):
missing_attrs.append(attr_name)
return missing_attrs | 6a3326616aa5d1cd083f99a2e0f4c57f6f5a11c6 | 3,655,907 |
def TokenStartBlockElement(block):
"""
`TokenStartBlockElement` is used to denote that we are starting a new block element.
Under most circumstances, this token will not render anything.
"""
return {
"type": "SpaceCharacters",
"data": "",
"_md_type": mdTokenTypes["TokenStartBlockElement"],
"_md_block": block,
} | c7690b2ca7babc0cc5d6e36a8b8ecb33ad463294 | 3,655,908 |
import json
def parse_json(json_path):
"""
Parse training params json file to python dictionary
:param json_path: path to training params json file
:return: python dict
"""
with open(json_path) as f:
d = json.load(f)
return d | c34b241813996a8245ea8c334de72f0fbffe8a31 | 3,655,909 |
from unittest.mock import call
def cut_tails(fastq, out_dir, trimm_adapter, trimm_primer, hangF, hangR):
"""
The functuion ...
Parameters
----------
reads : str
path to ...
out_dir : str
path to ...
hang1 : str
Sequence ...
hang2 : str
Sequence ...
Returns
-------
-
"""
output = fastq
# cut barcodes
if trimm_adapter == True:
call('porechop -i {} --verbosity 0 -t 100 --require_two_barcodes --extra_end_trim 0 -o {}/trimmed_barcode.fastq'.format(fastq, out_dir), shell=True)
fastq = out_dir + "/trimmed_barcode.fastq"
output = out_dir + "/trimmed_barcode.fastq"
# cut primers
if trimm_primer == True:
opn_fastq = parse(fastq, 'fastq')
# cut primers
with open('{}/trimmed_primer.fastq'.format(out_dir), 'w') as trimmed_fasta:
for record in opn_fastq:
for idx in range(4):
if idx != 1 or idx != 3:
trimmed_fasta.write(record.format('fastq').split('\n')[idx] + '\n')
else:
trimmed_fasta.write(record.format('fastq').split('\n')[idx][len(hangF): -len(hangR)] + '\n')
output = '{}/trimmed_primer.fastq'.format(out_dir)
return output | 8e4ef0b24d5ecf22aa298a0e4e8cddeb7d681945 | 3,655,910 |
from typing import Callable
from typing import Any
from typing import Optional
def incidentReports(
draw: Callable[..., Any],
new: bool = False,
event: Optional[Event] = None,
maxNumber: Optional[int] = None,
beforeNow: bool = False,
fromNow: bool = False,
) -> IncidentReport:
"""
Strategy that generates :class:`IncidentReport` values.
"""
automatic: Optional[bool]
if new:
number = 0
automatic = False
else:
number = draw(incidentNumbers(max=maxNumber))
automatic = None
if event is None:
event = draw(events())
return IncidentReport(
event=event,
number=number,
created=draw(dateTimes(beforeNow=beforeNow, fromNow=fromNow)),
summary=draw(incidentReportSummaries()),
incidentNumber=None, # FIXME: May allow some to be passed in?
reportEntries=draw(
lists(
reportEntries(
automatic=automatic, beforeNow=beforeNow, fromNow=fromNow
)
)
),
) | 6f81cd1294543572605d6bb477e3955be6dc122d | 3,655,911 |
def load_spelling(spell_file=SPELLING_FILE):
"""
Load the term_freq from spell_file
"""
with open(spell_file, encoding="utf-8") as f:
tokens = f.read().split('\n')
size = len(tokens)
term_freq = {token: size - i for i, token in enumerate(tokens)}
return term_freq | 236cb5306632990e1eefcf308dea224890ccd035 | 3,655,912 |
def NotP8():
"""
Return the matroid ``NotP8``.
This is a matroid that is not `P_8`, found on page 512 of [Oxl1992]_ (the
first edition).
EXAMPLES::
sage: M = matroids.named_matroids.P8()
sage: N = matroids.named_matroids.NotP8()
sage: M.is_isomorphic(N)
False
sage: M.is_valid()
True
"""
A = Matrix(GF(3), [
[1, 0, 0, 0, 0, 1, 1, -1],
[0, 1, 0, 0, 1, 0, 1, 1],
[0, 0, 1, 0, 1, 1, 0, 1],
[0, 0, 0, 1, -1, 1, 1, 1]
])
M = TernaryMatroid(A, 'abcdefgh')
M.rename('NotP8: ' + repr(M))
return M | d475810244338532f4611b120aa15b4776bd2aeb | 3,655,913 |
def eqv(var_inp):
"""Returns the von-mises stress of a Field or FieldContainer
Returns
-------
field : ansys.dpf.core.Field, ansys.dpf.core.FieldContainer
The von-mises stress of this field. Output type will match input type.
"""
if isinstance(var_inp, dpf.core.Field):
return _eqv(var_inp)
elif isinstance(var_inp, dpf.core.FieldsContainer):
return _eqv_fc(var_inp)
# elif isinstance(var_inp, dpf.core.Operator):
# return _eqv_op(var_inp)
else:
raise TypeError('Input type must be a Field or FieldContainer') | 5977b1317fc5bfa43c796b95680a7b2a21ae4553 | 3,655,914 |
def sort(array: list[int]) -> list[int]:
"""Counting sort implementation.
"""
result: list[int] = [0, ] * len(array)
low: int = min(array)
high: int = max(array)
count_array: list[int] = [0 for i in range(low, high + 1)]
for i in array:
count_array[i - low] += 1
for j in range(1, len(count_array)):
count_array[j] += count_array[j - 1]
for k in reversed(array):
result[count_array[k - low] - 1] = k
count_array[k - low] -= 1
return result | 86864db6e012d5e6afcded3365d6f2ca35a5b94b | 3,655,915 |
def build_updated_figures(
df, colorscale_name
):
"""
Build all figures for dashboard
Args:
- df: census 2010 dataset (cudf.DataFrame)
- colorscale_name
Returns:
tuple of figures in the following order
(datashader_plot, education_histogram, income_histogram,
cow_histogram, age_histogram, n_selected_indicator,
coordinates_4326_backup, position_backup)
"""
colorscale_transform = 'linear'
education_histogram = build_histogram_default_bins(
df, 'education', 'v', colorscale_name, colorscale_transform
)
income_histogram = build_histogram_default_bins(
df, 'income', 'v', colorscale_name, colorscale_transform
)
cow_histogram = build_histogram_default_bins(
df, 'cow', 'v', colorscale_name, colorscale_transform
)
age_histogram = build_histogram_default_bins(
df, 'age', 'v', colorscale_name, colorscale_transform
)
return (
education_histogram, income_histogram,
cow_histogram, age_histogram,
) | 01dd0f298f662f40919170b4e37c533bd3ba443b | 3,655,916 |
import os
def sacct():
"""
Wrapper around the slurm "sacct" command. Returns an object, and each
property is the (unformatted) value according to sacct.
Would also work with .e.g:
# with open("/home/Desktop/sacct.txt", "r") as file:
:return: SacctWrapper object, with attributes based on the output of sacct
"""
with os.popen("sacct -j $SLURM_JOB_ID -l --parsable2") as file:
return SacctWrapper(file) | df87679b9448ff340d1113af1d4b157e09b777b8 | 3,655,917 |
from typing import Optional
def get_or_else_optional(optional: Optional[_T], alt_value: _T) -> _T:
"""
General-purpose getter for `Optional`. If it's `None`, returns the `alt_value`.
Otherwise, returns the contents of `optional`.
"""
if optional is None:
return alt_value
return optional | 340fc67adc9e73d748e3c03bec9d20e1646e894c | 3,655,918 |
def create_dic(udic):
"""
Create a glue dictionary from a universal dictionary
"""
return udic | aa854bb8f4d23da7e37aa74727446d7436524fe2 | 3,655,919 |
import sys
def get_isotopes(loop):
"""
Given a text data loop, usually the very last one in the save frame, find and return the isotopes for naming.
Example output: [ '15N', '1H' ]
"""
# = = = Catch random error in giving the saveFrame instead
loop = loop_assert(loop, 'get_isotopes')
# For entries like hetNOE, the tags will be duplicated with
colsElement = get_col_tag_startswith(loop, 'Atom_type')
if np.any( [ loop[0][x] == '.' for x in colsElement] ):
# = = Entry is incomplete. Get name as backup.
print("= = WARNING: Entry does not contain Atom_type information. Using Atom_ID as backup.")
colsElement = get_col_tag_startswith(loop, 'Atom_ID')
listElement = [ loop[0][x] for x in colsElement]
#print( listElement )
for c in colsElement:
if not is_column_identical(loop,c):
print("= = ERROR in get isotopes(): the column entries for the isotopes are not identical!", file=sys.stderr)
return None
colsIsotopeNum = get_col_tag_startswith(loop,'Atom_isotope_number')
for c in colsIsotopeNum:
if not is_column_identical(loop,c):
print("= = ERROR in get isotopes(): the column entries for the isotopes are not identical!", file=sys.stderr)
return None
listIsotopeNum = [ loop[0][x] for x in colsIsotopeNum ]
if np.any( np.array(listIsotopeNum) == '.' ):
# = = Entry is incomplete. Get IDs
print("= = WARNING: Entry does not contain Atom_isotope_number information. Will guess using atom type information.")
listIsotopeNum = []
for x in listElement:
if x == 'H':
listIsotopeNum.append('1')
elif x == 'C':
listIsotopeNum.append('13')
elif x == 'N':
listIsotopeNum.append('15')
elif x == 'O':
listIsotopeNum.append('17')
else:
print("= = ERROR: Atom types is not H C, N, or O. Will bail.", file=sys.stderr)
sys.exit(1)
out=[]
for a,b in zip(listIsotopeNum, listElement):
out.append( "%s%s" % (a, b) )
return out | 22510e054def347cb0138d6a8f685afef666cb47 | 3,655,920 |
def select_user(query_message, mydb):
"""
Prompt the user to select from a list of all database users.
Args:
query_message - The messages to display in the prompt
mydb - A connected MySQL connection
"""
questions = [
inquirer.List('u',
message=query_message,
choices=list_users(mydb)
)
]
return inquirer.prompt(questions)['u'] | 8ab2adb27f73b5581bc48c8cc4cbc2888a21753f | 3,655,921 |
import os
import logging
def runTool(plugin_name, config_dict=None, user=None, scheduled_id=None,
caption=None, unique_output=True):
"""Runs a tool and stores this "run" in the :class:`evaluation_system.model.db.UserDB`.
:type plugin_name: str
:param plugin_name: name of the referred plugin.
:type config_dict: dict or metadict
:param config_dict: The configuration used for running the tool. If is None, the default configuration will be stored,
this might be incomplete.
:type user: :class:`evaluation_system.model.user.User`
:param user: The user starting the tool
:type scheduled_id: int
:param scheduled_id: if the process is already scheduled then put the row id here
:type caption: str
:param caption: the caption to set.
"""
plugin_name = plugin_name.lower()
if user is None:
user = User()
p = getPluginInstance(plugin_name, user)
complete_conf = None
# check whether a scheduled id is given
if scheduled_id:
config_dict = loadScheduledConf(plugin_name, scheduled_id, user)
if config_dict is None:
conf_file = user.getUserToolConfig(plugin_name)
if os.path.isfile(conf_file):
log.debug('Loading config file %s', conf_file)
with open(conf_file, 'r') as f:
complete_conf = p.readConfiguration(f)
else:
log.debug('No config file was found in %s', conf_file)
if complete_conf is None:
# at this stage we want to resolve or tokens and perform some kind of sanity check before going further
complete_conf = p.setupConfiguration(config_dict=config_dict, recursion=True)
log.debug('Running %s with %s', plugin_name, complete_conf)
rowid = 0
if scheduled_id:
user.getUserDB().upgradeStatus(scheduled_id,
user.getName(),
History.processStatus.running)
rowid = scheduled_id
elif user:
version_details = getVersion(plugin_name)
rowid = user.getUserDB().storeHistory(p,
complete_conf,
user.getName(),
History.processStatus.running,
version_details=version_details,
caption=caption)
# follow the notes
followHistoryTag(rowid, user.getName(), 'Owner')
try:
# we want that the rowid is visible to the tool
p.rowid = rowid
# In any case we have now a complete setup in complete_conf
result = p._runTool(config_dict=complete_conf,
unique_output=unique_output)
# save results when existing
if result is None:
user.getUserDB().upgradeStatus(rowid,
user.getName(),
History.processStatus.finished_no_output)
else:
# create the preview
preview_path = config.get(config.PREVIEW_PATH, None)
if preview_path:
logging.debug('Converting....')
_preview_create(plugin_name, result)
logging.debug('finished')
# write the created files to the database
logging.debug('Storing results into data base....')
user.getUserDB().storeResults(rowid, result)
logging.debug('finished')
# temporary set all processes to finished
user.getUserDB().upgradeStatus(rowid,
user.getName(),
History.processStatus.finished)
except:
user.getUserDB().upgradeStatus(rowid,
user.getName(),
History.processStatus.broken)
raise
return result | 66c48dcef414a453e9b07ea58330ab08e1bd94d5 | 3,655,922 |
from typing import Optional
import hashlib
def generate_abl_contract_for_lateral_stage(
lateral_stage: LateralProgressionStage,
parent_blinding_xkey: CCoinExtKey,
start_block_num: int,
creditor_control_asset: CreditorAsset,
debtor_control_asset: DebtorAsset,
bitcoin_asset: BitcoinAsset,
first_stage_input_descriptor: Optional[BlindingInputDescriptor] = None
) -> int:
"""
Generate the main contract code and accompanying data,
and store all the info in vertical stage objects
"""
assert start_block_num > 0
lstage = lateral_stage
plan = lstage.plan
lstage_blinding_xkey = safe_derive(
parent_blinding_xkey, STAGE_NEXT_LEVEL_PATH
)
# Need blinding factors and input descriptors ready
# before we can generate the scripts
for vstage in lstage.vertical_stages:
blinding_xkey = safe_derive(
lstage_blinding_xkey, f'{vstage.index_m}h')
blinding_factor = hashlib.sha256(
safe_derive(blinding_xkey, STAGE_BLINDING_FACTOR_PATH)
).digest()
asset_blinding_factor = hashlib.sha256(
safe_derive(blinding_xkey, STAGE_BLINDING_ASSET_FACTOR_PATH)
).digest()
if lstage.level_n == 0 and vstage.index_m == 0:
assert first_stage_input_descriptor is not None
contract_input_descriptor = first_stage_input_descriptor
first_stage_input_descriptor = None
else:
assert first_stage_input_descriptor is None
contract_input_descriptor = BlindingInputDescriptor(
asset=plan.collateral.asset,
amount=plan.collateral.amount,
blinding_factor=Uint256(blinding_factor),
asset_blinding_factor=Uint256(asset_blinding_factor),
)
vstage.blinding_data = VerticalProgressionStageBlindingData(
blinding_xkey, contract_input_descriptor
)
collateral_grab_outs_hash = \
get_hash_of_collateral_forfeiture_checked_outs(
lstage.vertical_stages[-1],
creditor_control_asset, debtor_control_asset, bitcoin_asset)
total_vstages = 0
# Need to process in reverse, because scripts in earlier stages
# depend on scripts in later stages
for vstage in reversed(lstage.vertical_stages):
total_vstages += 1
if vstage.next_lateral_stage:
total_vstages += generate_abl_contract_for_lateral_stage(
vstage.next_lateral_stage,
vstage.blinding_data.blinding_xkey,
start_block_num,
creditor_control_asset,
debtor_control_asset,
bitcoin_asset
)
full_repayment_cod = get_full_repayment_checked_outs_data(
vstage,
creditor_control_asset,
debtor_control_asset,
bitcoin_asset,
)
partial_repayment_cod = get_partial_repayment_checked_outs_data(
vstage,
creditor_control_asset,
debtor_control_asset,
bitcoin_asset,
)
revoc_cod = get_revocation_tx_checked_outs_data(
vstage,
creditor_control_asset,
bitcoin_asset
)
stage_script, checked_outs_hashes = \
generate_script_and_checked_outs_hashes(
vstage,
creditor_control_asset,
debtor_control_asset,
start_block_num,
full_repayment_checked_outs_data=full_repayment_cod,
partial_repayment_checked_outs_data=partial_repayment_cod,
revoc_checked_outs_data=revoc_cod,
hash_of_collateral_grab_outputs_data=collateral_grab_outs_hash,
)
vstage.script_data = VerticalProgressionStageScriptData(
stage_script, checked_outs_hashes
)
return total_vstages | 2c9b47666c3fb5abf78b8a7d007d1258930f1068 | 3,655,923 |
import sqlite3
import os
import time
def create_connection(language):
"""
a function to create sqlite3 connections to db, it retries 100 times if connection returned an error
Args:
language: language
Returns:
sqlite3 connection if success otherwise False
"""
try:
# retries
for i in range(0, 100):
try:
return sqlite3.connect(os.path.join(os.path.dirname(os.path.dirname(__file__)),
_builder(_core_config(), _core_default_config())["api_db_name"]))
except:
time.sleep(0.01)
except:
warn(messages(language, 168))
return False | b1fc3ddf9217ad693246c1498e9c41cc7a7bb386 | 3,655,924 |
def force_norm():
"""perform normalization simulation"""
norm = meep.Simulation(cell_size=cell,
boundary_layers=[pml],
geometry=[],
resolution=resolution)
norm.init_fields()
source(norm)
flux_inc = meep_ext.add_flux_plane(norm, fcen, df, nfreq, [0,0,0], [W, W, 0])
norm.run(until_after_sources=meep.stop_when_fields_decayed(.5*um, decay,
pt=meep.Vector3(0,0,0), decay_by=1e-3))
return {'frequency': np.array(meep.get_flux_freqs(flux_inc)), 'area': (W)**2,
'incident': np.asarray(meep.get_fluxes(flux_inc))} | e5c9e6255568e52d0cb30504cd22f610b6f6e5d9 | 3,655,925 |
def search(coordinates):
"""Search for closest known locations to these coordinates
"""
gd = GeocodeData()
return gd.query(coordinates) | c9191a06b085c61b547136166cb43a24789d95cb | 3,655,926 |
from pathlib import Path
def get_all_apis_router(_type: str, root_path: str) -> (Path, Path):
"""Return api files and definition files just put the file on folder swagger."""
swagger_path = Path(root_path)
all_files = list(x.name for x in swagger_path.glob("**/*.yaml"))
schemas_files = [x for x in all_files if "schemas" in x]
api_files = [x for x in all_files if "schemas" not in x and "main" not in x]
return api_files if _type == "api" else schemas_files | eab89c870447e3f1abd72529de37d645de3be612 | 3,655,927 |
def get_cached_patches(dataset_dir=None):
"""
Finds the cached patches (stored as images) from disk and returns their paths as a list of tuples
:param dataset_dir: Path to the dataset folder
:return: List of paths to patches as tuples (path_to_left, path_to_middle, path_to_right)
"""
if dataset_dir is None:
dataset_dir = config.DATASET_DIR
cache_dir = join(dataset_dir, 'cache')
frame_paths = [join(cache_dir, x) for x in listdir(cache_dir)]
frame_paths = [x for x in frame_paths if is_image(x)]
frame_paths.sort()
tuples = []
for i in range(len(frame_paths) // config.MAX_SEQUENCE_LENGTH):
foo = (frame_paths[i * config.MAX_SEQUENCE_LENGTH + ix] for ix in range(config.MAX_SEQUENCE_LENGTH))
tuples.append(list(foo))
return tuples | 7990b592ddc9b93e04b11c4ae65f410c6afc15d7 | 3,655,928 |
def complex_mse(y_true: tf.Tensor, y_pred: tf.Tensor):
"""
Args:
y_true: The true labels, :math:`V \in \mathbb{C}^{B \\times N}`
y_pred: The true labels, :math:`\\widehat{V} \in \mathbb{C}^{B \\times N}`
Returns:
The complex mean squared error :math:`\\boldsymbol{e} \in \mathbb{R}^B`,
where given example :math:`\\widehat{V}_i \in \mathbb{C}^N`,
we have :math:`e_i = \\frac{\|V_i - \\widehat{V}_i\|^2}{N}`.
"""
real_loss = tf.losses.mse(tf.math.real(y_true), tf.math.real(y_pred))
imag_loss = tf.losses.mse(tf.math.imag(y_true), tf.math.imag(y_pred))
return (real_loss + imag_loss) / 2 | 9dc8699312926b379619e56a29529fe2762d68a9 | 3,655,929 |
def expand_not(tweets):
"""
DESCRIPTION:
In informal speech, which is widely used in social media, it is common to use contractions of words
(e.g., don't instead of do not).
This may result in misinterpreting the meaning of a phrase especially in the case of negations.
This function expands these contractions and other similar ones (e.g it's --> it is etc...).
INPUT:
tweets: Series of a set of tweets as a python strings
OUTPUT:
Series of filtered tweets
"""
tweets = tweets.str.replace('n\'t', ' not', case=False)
tweets = tweets.str.replace('i\'m', 'i am', case=False)
tweets = tweets.str.replace('\'re', ' are', case=False)
tweets = tweets.str.replace('it\'s', 'it is', case=False)
tweets = tweets.str.replace('that\'s', 'that is', case=False)
tweets = tweets.str.replace('\'ll', ' will', case=False)
tweets = tweets.str.replace('\'l', ' will', case=False)
tweets = tweets.str.replace('\'ve', ' have', case=False)
tweets = tweets.str.replace('\'d', ' would', case=False)
tweets = tweets.str.replace('he\'s', 'he is', case=False)
tweets = tweets.str.replace('what\'s', 'what is', case=False)
tweets = tweets.str.replace('who\'s', 'who is', case=False)
tweets = tweets.str.replace('\'s', '', case=False)
for punct in ['!', '?', '.']:
regex = "(\\"+punct+"( *)){2,}"
tweets = tweets.str.replace(regex, punct+' <repeat> ', case=False)
return tweets | 66f4ed5c7321fe7bf5ea0d350980394a235d99e6 | 3,655,930 |
def parse_filter_kw(filter_kw):
"""
Return a parsed filter keyword and boolean indicating if filter is a hashtag
Args:
:filter_kw: (str) filter keyword
Returns:
:is_hashtag: (bool) True, if 'filter_kw' is hashtag
:parsed_kw: (str) parsed 'filter_kw' (lowercase, without '#', ...)
"""
filter_kw = filter_kw.strip()
is_hashtag = filter_kw.startswith('#')
parsed_kw = parse_string(filter_kw, remove=('#', "'")).lower()
return (is_hashtag, parsed_kw) | 253d7d5f1aaf6ab3838e7fb3ba395a919f29b70e | 3,655,931 |
def get_branch_index(BRANCHES, branch_name):
"""
Get the place of the branch name in the array of BRANCHES so will know into which next branch to merge - the next one in array.
"""
i = 0
for branch in BRANCHES:
if branch_name == branch:
return i
else:
i = i + 1 | c983bab67b3aa0cd1468c39f19732395c7e376f9 | 3,655,932 |
from bs4 import BeautifulSoup
def prettify_save(soup_objects_list, output_file_name):
"""
Saves the results of get_soup() function to a text file.
Parameters:
-----------
soup_object_list:
list of BeautifulSoup objects to be saved to the text file
output_file_name:
entered as string with quotations and with extension .txt , used to name the output text file
This function can work independent of the rest of the library.
Note:
Unique to Windows, open() needs argument: encoding = 'utf8' for it to work.
"""
prettified_soup = [BeautifulSoup.prettify(k) for k in soup_objects_list]
custom_word_added = [m + 'BREAKHERE' for m in prettified_soup]
one_string = "".join(custom_word_added)
# unique to Windows, open() needs argument: encoding = "utf8"
with open(output_file_name, 'w') as file:
file.write(one_string)
return None | 3de5b7df49837c24e89d2ded286c0098069945fd | 3,655,933 |
def determine_required_bytes_signed_integer(value: int) -> int:
"""
Determines the number of bytes that are required to store value
:param value: a SIGNED integer
:return: 1, 2, 4, or 8
"""
value = ensure_int(value)
if value < 0:
value *= -1
value -= 1
if (value >> 7) == 0:
return 1
if (value >> 15) == 0:
return 2
if (value >> 31) == 0:
return 4
if (value >> 63) == 0:
return 8
raise IntegerLargerThan64BitsException | 231e6f1fc239da5afe7f7600740ace846125e7f5 | 3,655,934 |
from bs4 import BeautifulSoup
def scrape_cvs():
"""Scrape and return CVS data."""
page_headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.150 Safari/537.36",
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9"}
page = get_resource(CVS_ROOT + CVS_VACCINE_PAGE, page_headers)
soup = BeautifulSoup(page.content, 'html.parser')
modals = [elem for elem in soup.find_all(
class_='modal__box') if elem.get('id').startswith('vaccineinfo')]
state_urls = {}
for modal in modals:
state = modal.get('id').split('-')[-1]
state_urls[state] = CVS_ROOT + \
modal.find(class_='covid-status').get('data-url')
state_dfs = []
state_headers = {
'authority': 'www.cvs.com',
'user-agent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.150 Safari/537.36",
'accept': '*/*',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'cors',
'sec-fetch-dest': 'empty',
'referer': 'https://www.cvs.com/immunizations/covid-19-vaccine',
'accept-language': 'en-US,en;q=0.9',
'referrerPolicy': 'strict-origin-when-cross-origin',
'mode': 'cors',
'credentials': 'include'
}
for state, url in state_urls.items():
print(url)
state_response = get_resource(url, state_headers)
state_df = cvs_json_to_df(state, state_response.json())
state_dfs.append(state_df)
return pd.concat(state_dfs) | 2f2f59b3477297f475d1749ff2a35c2682361cfd | 3,655,935 |
def _to_original(sequence, result):
""" Cast result into the same type
>>> _to_original([], ())
[]
>>> _to_original((), [])
()
"""
if isinstance(sequence, tuple):
return tuple(result)
if isinstance(sequence, list):
return list(result)
return result | 7b9d8d1d2b119d61b43dde253d8d3c48bd0e45b8 | 3,655,936 |
import os
def generate_oauth_service():
"""Prepare the OAuth2Service that is used to make requests later."""
return OAuth2Service(
client_id=os.environ.get('UBER_CLIENT_ID'),
client_secret=os.environ.get('UBER_CLIENT_SECRET'),
name=config.get('name'),
authorize_url=config.get('authorize_url'),
access_token_url=config.get('access_token_url'),
base_url=config.get('base_url'),
) | dcffe4f283f45eacf52c5b01fc43c20c294b6d05 | 3,655,937 |
import os
def is_relative_path(value):
"""Check if the given value is a relative path"""
if urlparse(value).scheme in ('http', 'https', 'file'):
return False
return not os.path.isabs(value) | 2b6865963f1335a17e5270fc9212426aa5d52536 | 3,655,938 |
def get_B_R(Rdot):
"""Get B_R from Q, Qdot"""
return Rdot | 696932b9bf423289bdcf91287b0d789007322852 | 3,655,939 |
def run_coroutine_with_span(span, coro, *args, **kwargs):
"""Wrap the execution of a Tornado coroutine func in a tracing span.
This makes the span available through the get_current_span() function.
:param span: The tracing span to expose.
:param coro: Co-routine to execute in the scope of tracing span.
:param args: Positional args to func, if any.
:param kwargs: Keyword args to func, if any.
"""
with span_in_stack_context(span=span):
return coro(*args, **kwargs) | 95672b0a1ecf7b8b86dff09835fa9b3c10f7fad2 | 3,655,940 |
def calc_bin_centre(bin_edges):
"""
Calculates the centre of a histogram bin from the bin edges.
"""
return bin_edges[:-1] + np.diff(bin_edges) / 2 | 780a02dc9372670ae53fb4d85e216458e7d83975 | 3,655,941 |
def to_matrix(dG, tG, d_mat, t_mat, label_mat, bridges):
"""
Parameters:
tG: target graph
dG: drug graph
d_mat: drug feature matrix
t_mat: target feature matrix
label_mat: label matrix
bridges: known links between drugs and targets
Return:
d_feature, t_feature
"""
drug_feature, target_feature = {},{}
new_label = set()
for d,t,i in label_mat:
if d in dG.nodes and t in tG.nodes:
#d_vector = np.zeros(d_mat[d].shape)
#t_vector = np.zeros(t_mat[t].shape)
#if i == 1:
d_vector = d_mat[d]
t_vector = t_mat[t]
addressed_d = set()
addressed_t = set()
for link in bridges:
if link[0] in dG.nodes and link[1] in tG.nodes:
if nx.has_path(dG, d, link[0]) and nx.has_path(tG, t, link[1]):
if link[0] not in addressed_d:
#print(f'di: {d}, dl: {link[0]}')
max_sim_d = max_sim(d,link[0],dG)
d_vector = sim_vec(d_vector, d_mat[link[0]],max_sim_d)
addressed_d.add(link[0])
elif link[1] not in addressed_t:
#print(f'tj: {t}, tl: {link[1]}')
max_sim_t = max_sim(t,link[1],tG)
t_vector = sim_vec(t_vector, t_mat[link[1]],max_sim_t)
addressed_t.add(link[1])
drug_feature[d] = d_vector
target_feature[t] = t_vector
new_label.add((d,t,i))
return drug_feature, target_feature, new_label | 54a8ad910f78eca383eba90bd5f6bf6088145630 | 3,655,942 |
def ensureList(obj):
""" ensures that object is list """
if isinstance(obj, list):
return obj # returns original lis
elif hasattr(obj, '__iter__'): # for python 2.x check if obj is iterablet
return list(obj) # converts to list
else:
return [obj] | f845658fda36a583ac54caed1e6493d331c910fa | 3,655,943 |
import torch
def gelu_impl(x):
"""OpenAI's gelu implementation."""
return 0.5 * x * (1.0 + torch.tanh(0.7978845608028654 * x * (1.0 + 0.044715 * x * x))) | 15ced02b61d6e8c526bc60d2a5214f83183946c2 | 3,655,944 |
def get_shape(kind='line', x=None, y=None, x0=None, y0=None, x1=None, y1=None, span=0, color='red', dash='solid',
width=1,
fillcolor=None, fill=False, opacity=1, xref='x', yref='y'):
"""
Returns a plotly shape
Parameters:
-----------
kind : string
Shape kind
line
rect
circle
x : float
x values for the shape.
This assumes x0=x1
x0 : float
x0 value for the shape
x1 : float
x1 value for the shape
y : float
y values for the shape.
This assumes y0=y1
y0 : float
y0 value for the shape
y1 : float
y1 value for the shape
color : string
color for shape line
dash : string
line style
solid
dash
dashdot
dot
width : int
line width
fillcolor : string
shape fill color
fill : bool
If True then fill shape
If not fillcolor then the
line color will be used
opacity : float [0,1]
opacity of the fill
xref : string
Sets the x coordinate system
which this object refers to
'x'
'paper'
'x2' etc
yref : string
Sets the y coordinate system
which this object refers to
'y'
'paper'
'y2' etc
"""
if x1 is None:
if x0 is None:
if x is None:
xref = 'paper'
x0 = 0
x1 = 1
else:
x0 = x1 = x
else:
x1 = x0
else:
x
if y1 is None:
if y0 is None:
if y is None:
yref = 'paper'
y0 = 0
y1 = 1
else:
y0 = y1 = y
else:
y1 = y0
shape = {'x0': x0,
'y0': y0,
'x1': x1,
'y1': y1,
'line': {
'color': normalize(color),
'width': width,
'dash': dash
},
'xref': xref,
'yref': yref
}
if kind == 'line':
shape['type'] = 'line'
elif kind == 'circle':
shape['type'] = 'circle'
elif kind == 'rect':
shape['type'] = 'rect'
else:
raise Exception("Invalid or unkown shape type : {0}".format(kind))
if (fill or fillcolor) and kind != 'line':
fillcolor = color if not fillcolor else fillcolor
fillcolor = to_rgba(normalize(fillcolor), opacity)
shape['fillcolor'] = fillcolor
return shape | b639869eca941d2c91d44549aa751c51e033fe00 | 3,655,945 |
from typing import List
def clean_row(elements: List[Tag]) -> List[Tag]:
"""
Clean MathML row, removing children that should not be considered tokens or child symbols.
One example of cleaning that should take place here is removing 'd' and 'δ' signs that are
used as derivatives, instead of as identifiers.
"""
# Remove whitespace between elements.
elements = [e for e in elements if not (isinstance(e, str) and e.isspace())]
# Remove quantifiers and double bars.
elements = [e for e in elements if e.text not in ["∀", "∃"]]
elements = [e for e in elements if e.text not in ["|", "∥"]]
# Remove 'd's and 'δ's used as signs for derivatives.
derivatives_cleaned = []
DERIVATIVE_GLYPHS = ["d", "δ", "∂"]
for i, e in enumerate(elements):
is_derivative_symbol = (
# Is the glyph a derivative sign?
e.name == "mi"
and e.text in DERIVATIVE_GLYPHS
# Is the next element a symbol?
and (i < len(elements) - 1 and _is_identifier(elements[i + 1]))
# Is the element after that either not a symbol, or another derivative sign?
and (
i == len(elements) - 2
or not _is_identifier(elements[i + 2])
or elements[i + 2].text in DERIVATIVE_GLYPHS
)
)
if not is_derivative_symbol:
derivatives_cleaned.append(e)
elements = derivatives_cleaned
return elements | 527cb06ddb19d9fb25e5805c49f903254813c4e8 | 3,655,946 |
def models(estimators, cv_search, transform_search):
"""
Grid search prediction workflows. Used by bll6_models, test_models, and product_models.
Args:
estimators: collection of steps, each of which constructs an estimator
cv_search: dictionary of arguments to LeadCrossValidate to search over
transform_search: dictionary of arguments to LeadTransform to search over
Returns: a list drain.model.Predict steps constructed by taking the product of
the estimators with the the result of drain.util.dict_product on each of
cv_search and transform_search.
Each Predict step contains the following in its inputs graph:
- lead.model.cv.LeadCrossValidate
- lead.model.transform.LeadTransform
- drain.model.Fit
"""
steps = []
for cv_args, transform_args, estimator in product(
dict_product(cv_search), dict_product(transform_search), estimators):
cv = lead.model.cv.LeadCrossValidate(**cv_args)
cv.name = 'cv'
X_train = Call('__getitem__', inputs=[MapResults([cv], {'X':'obj', 'train':'key',
'test':None, 'aux':None})])
mean = Call('mean', inputs=[X_train])
mean.name = 'mean'
X_impute = Construct(data.impute,
inputs=[MapResults([cv], {'aux':None, 'test':None, 'train':None}),
MapResults([mean], 'value')])
cv_imputed = MapResults([X_impute, cv], ['X', {'X':None}])
cv_imputed.target = True
transform = lead.model.transform.LeadTransform(inputs=[cv_imputed], **transform_args)
transform.name = 'transform'
fit = model.Fit(inputs=[estimator, transform], return_estimator=True)
fit.name = 'fit'
y = model.Predict(inputs=[fit, transform],
return_feature_importances=True)
y.name = 'predict'
y.target = True
steps.append(y)
return steps | 2a3044a9cc994f18e37337a7e58e9fb9e5ef05d1 | 3,655,947 |
from datetime import datetime
import pytz
def xml_timestamp(location='Europe/Prague'):
"""Method creates timestamp including time zone
Args:
location (str): time zone location
Returns:
str: timestamp
"""
return datetime.datetime.now(pytz.timezone(location)).isoformat() | a2883e269c8f9ae8ffd723b7b0205d931453e358 | 3,655,948 |
def transform_postorder(comp, func):
"""Traverses `comp` recursively postorder and replaces its constituents.
For each element of `comp` viewed as an expression tree, the transformation
`func` is applied first to building blocks it is parameterized by, then the
element itself. The transformation `func` should act as an identity function
on the kinds of elements (computation building blocks) it does not care to
transform. This corresponds to a post-order traversal of the expression tree,
i.e., parameters are alwaysd transformed left-to-right (in the order in which
they are listed in building block constructors), then the parent is visited
and transformed with the already-visited, and possibly transformed arguments
in place.
NOTE: In particular, in `Call(f,x)`, both `f` and `x` are arguments to `Call`.
Therefore, `f` is transformed into `f'`, next `x` into `x'` and finally,
`Call(f',x')` is transformed at the end.
Args:
comp: The computation to traverse and transform bottom-up.
func: The transformation to apply locally to each building block in `comp`.
It is a Python function that accepts a building block at input, and should
return either the same, or transformed building block at output. Both the
intput and output of `func` are instances of `ComputationBuildingBlock`.
Returns:
The result of applying `func` to parts of `comp` in a bottom-up fashion.
Raises:
TypeError: If the arguments are of the wrong computation_types.
NotImplementedError: If the argument is a kind of computation building block
that is currently not recognized.
"""
py_typecheck.check_type(comp,
computation_building_blocks.ComputationBuildingBlock)
if isinstance(
comp,
(computation_building_blocks.CompiledComputation,
computation_building_blocks.Data, computation_building_blocks.Intrinsic,
computation_building_blocks.Placement,
computation_building_blocks.Reference)):
return func(comp)
elif isinstance(comp, computation_building_blocks.Selection):
return func(
computation_building_blocks.Selection(
transform_postorder(comp.source, func), comp.name, comp.index))
elif isinstance(comp, computation_building_blocks.Tuple):
return func(
computation_building_blocks.Tuple([(k, transform_postorder(
v, func)) for k, v in anonymous_tuple.to_elements(comp)]))
elif isinstance(comp, computation_building_blocks.Call):
transformed_func = transform_postorder(comp.function, func)
if comp.argument is not None:
transformed_arg = transform_postorder(comp.argument, func)
else:
transformed_arg = None
return func(
computation_building_blocks.Call(transformed_func, transformed_arg))
elif isinstance(comp, computation_building_blocks.Lambda):
transformed_result = transform_postorder(comp.result, func)
return func(
computation_building_blocks.Lambda(
comp.parameter_name, comp.parameter_type, transformed_result))
elif isinstance(comp, computation_building_blocks.Block):
return func(
computation_building_blocks.Block(
[(k, transform_postorder(v, func)) for k, v in comp.locals],
transform_postorder(comp.result, func)))
else:
raise NotImplementedError(
'Unrecognized computation building block: {}'.format(str(comp))) | 964e55dc33acf978cae3f058397c9b355cae9af7 | 3,655,949 |
def bytes_to_unicode_records(byte_string, delimiter, encoding):
""" Convert a byte string to a tuple containing an array of unicode
records and any remainder to be used as a prefix next time. """
string = byte_string.decode(encoding)
records = string.split(delimiter)
return (records[:-1], records[-1].encode(encoding)) | ccc3591551a6b316843cc8eafb33e45627eac752 | 3,655,950 |
def administrator():
"""Returns a :class:`t_system.administration.Administrator` instance."""
return Administrator() | e473ee2e86f66f96a5cf3e09ac4a052e32a279b9 | 3,655,951 |
import numpy
def ocr(path, lang='eng'):
"""Optical Character Recognition function.
Parameters
----------
path : str
Image path.
lang : str, optional
Decoding language. Default english.
Returns
-------
"""
image = Image.open(path)
vectorized_image = numpy.asarray(image).astype(numpy.uint8)
vectorized_image[:, :, 0] = 0
vectorized_image[:, :, 2] = 0
im = cv2.cvtColor(vectorized_image, cv2.COLOR_RGB2GRAY)
return pytesseract.image_to_string(
Image.fromarray(im),
lang=lang
)[:5] | 9b484779a34d65bb25e57baeaa371205c65d2dc6 | 3,655,952 |
import subprocess
def get_rpd_vars():
"""Read RPD variables set by calling and parsing output from init
"""
cmd = get_init_call()
cmd = ' '.join(cmd) + ' && set | grep "^RPD_"'
try:
res = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
logger.fatal("Couldn't call init %s. Result was: %s", cmd, res)
raise
rpd_vars = dict()
for line in res.decode().splitlines():
if line.startswith('RPD_') and '=' in line:
#line = line.replace("export ", "")
#line = ''.join([c for c in line if c not in '";\''])
#logger.debug("line = {}".format(line))
k, v = line.split('=')
rpd_vars[k.strip()] = v.strip()
return rpd_vars | c7ac35b81eab03ab71469e42a3e3858c7dbb0f2c | 3,655,953 |
import os
def which(filename):
"""
Emulates the UNIX `which` command in Python.
Raises an IOError if no result is found.
"""
locations = os.environ.get("PATH").split(os.pathsep)
candidates = []
for location in locations:
candidate = os.path.join(location, filename)
if os.path.isfile(candidate) or os.path.islink(candidate):
candidates.append(candidate)
if len(candidates) == 0:
raise IOError("Could not find '{0}' in PATH".format(filename))
return candidates[0] | 33cdbea19e9d9300bd4983efb7d2bc37a2d38fc1 | 3,655,954 |
def _GetAllHypervisorParameters(cluster, instances):
"""Compute the set of all hypervisor parameters.
@type cluster: L{objects.Cluster}
@param cluster: the cluster object
@param instances: list of L{objects.Instance}
@param instances: additional instances from which to obtain parameters
@rtype: list of (origin, hypervisor, parameters)
@return: a list with all parameters found, indicating the hypervisor they
apply to, and the origin (can be "cluster", "os X", or "instance Y")
"""
hvp_data = []
for hv_name in cluster.enabled_hypervisors:
hvp_data.append(("cluster", hv_name, cluster.GetHVDefaults(hv_name)))
for os_name, os_hvp in cluster.os_hvp.items():
for hv_name, hv_params in os_hvp.items():
if hv_params:
full_params = cluster.GetHVDefaults(hv_name, os_name=os_name)
hvp_data.append(("os %s" % os_name, hv_name, full_params))
# TODO: collapse identical parameter values in a single one
for instance in instances:
if instance.hvparams:
hvp_data.append(("instance %s" % instance.name, instance.hypervisor,
cluster.FillHV(instance)))
return hvp_data | 8ad0479fd87bc9ad17d993772046c1a2fd1bb7a5 | 3,655,955 |
def is_solution(system, point):
"""
Checks whether the point is the solution for a given constraints system.
"""
a = np.array(system)
# get the left part
left = a[:, :-1] * point
left = sum(left.T)
# get the right part
right = (-1) * a[:, -1]
return np.all(left <= right) | 774987f22a57f3a6d68b5d51f7b3a42d945a1eff | 3,655,956 |
def git_config_bool(option: str) -> bool:
"""
Return a boolean git config value, defaulting to False.
"""
return git_config(option) == "true" | 1ed48faa3c6de43fc8a732aed2fde1a81bc75949 | 3,655,957 |
def read_configs(paths):
"""
Read yaml files and merged dict.
"""
eths = dict()
vlans = dict()
bonds = dict()
for path in paths:
cfg = read_config(path)
ifaces = cfg.get("network", dict())
if "ethernets" in ifaces:
eths.update(ifaces["ethernets"])
if "vlans" in ifaces:
vlans.update(ifaces["vlans"])
if "bonds" in ifaces:
bonds.update(ifaces["bonds"])
return dict(
ethernets=eths,
vlans=vlans,
bonds=bonds
) | 998c75b9d75e4d6404c265a67c31bb88b9b7d435 | 3,655,958 |
import json
def get_client():
""" generates API client with personalized API key """
with open("api_key.json") as json_file:
apikey_data = json.load(json_file)
api_key = apikey_data['perspective_key']
# Generates API client object dynamically based on service name and version.
perspective = discovery.build('commentanalyzer', 'v1alpha1',
developerKey=api_key)
dlp = discovery.build('dlp', 'v2', developerKey=api_key)
return (apikey_data, perspective, dlp) | be68eeeedf9c3dcf3f3991b70db18cd3032d2218 | 3,655,959 |
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
settings['route_patterns'] = {
'villages': '/geography.cfm',
'parameters': '/thesaurus.cfm',
'sources': '/bibliography.cfm',
'languages': '/languages.cfm',
'florafauna': '/florafauna.cfm',
'bangime': '/bangime.cfm',
'file': r'/_files/{id:[^/\.]+}',
'file_alt': r'/_files/{id:[^/\.]+}.{ext}',
}
config = Configurator(settings=settings)
config.include('clldmpg')
config.register_menu(
('dataset', partial(menu_item, 'dataset', label='Home')),
('languages', partial(menu_item, 'languages')),
('values', partial(menu_item, 'values', label='Lexicon')),
('parameters', partial(menu_item, 'parameters', label='Thesaurus')),
('villages', partial(menu_item, 'villages', label='Villages')),
('florafauna', partial(menu_item, 'florafauna', label='Flora-Fauna')),
#('contributors', partial(menu_item, 'contributors', label='Project members')),
('sources', partial(menu_item, 'sources', label='Materials')),
#('bangime', partial(menu_item, 'bangime', label='Bangime')),
#('other', partial(menu_item, 'other', label='Other Languages')),
('movies', partial(menu_item, 'movies', label='Videos')),
)
home_comp = config.registry.settings['home_comp']
home_comp = [
'bangime', 'other',
'contributors'] + home_comp
config.add_settings({'home_comp': home_comp})
config.register_resource('village', models.Village, IVillage, with_index=True)
config.register_resource('movie', models.Movie, IMovie, with_index=True)
config.register_resource('file', models.File, IFile, with_index=True)
config.registry.registerUtility(CustomFactoryQuery(), ICtxFactoryQuery)
config.add_page('bangime')
config.add_page('florafauna')
config.add_page('other')
config.add_page('typology')
return config.make_wsgi_app() | 52779856e4eeecb9673707b707d51322decda729 | 3,655,960 |
def overrides(pattern, norminput):
"""Split a date subfield into beginning date and ending date. Needed for fields with
multiple hyphens.
Args:
pattern: date pattern
norminput: normalized date string
Returns:
start date portion of pattern
start date portion of norminput
end date portion of pattern
end date portion of norminput
"""
if pattern == 'NNNN-NNNN-':
return pattern[:4], pattern[5:9], norminput[:4], norminput[5:9]
if pattern == 'NNNN?-NNNN? av. j.-c.':
return pattern[:5], pattern[6:], norminput[:5], norminput[6:]
if pattern == 'NN---NNNN':
return pattern[:4], pattern[5:], norminput[:4], norminput[5:]
if pattern == 'NNNN-NNNN av. j.-c.':
return pattern[:4], pattern[5:], norminput[:4], norminput[5:]
if pattern == 'NNNN--':
return pattern[:4], None, norminput[:4], None
if pattern == 'NNNN-NN--':
return pattern[:4], pattern[5:], norminput[:4], norminput[5:]
if pattern == 'f. NNNN-NN-NN':
return pattern, None, norminput, None
if pattern == 'NNNN?-NNNN av. j.-c.':
return pattern[:5], pattern[6:], norminput[:5], norminput[6:]
if pattern == 'NN-NN-NNNN':
return pattern, None, norminput, None
if pattern == '-NNNN-':
return None, pattern[:-1], None, norminput[:-1]
if pattern == 'NNNN--NNNN':
return pattern[:4], pattern[6:], norminput[:4], norminput[6:]
if pattern == 'NNNN-NN--?':
return pattern[:4], pattern[5:], norminput[:4], norminput[5:]
if pattern == 'NNNNNNNN':
return pattern, None, norminput, None
if pattern == 'NN..-NNNN av. j.-c.':
return pattern[:4], pattern[5:], norminput[:4], norminput[5:]
if pattern == 'NNNN-NNN-':
return pattern[:4], pattern[5:], norminput[:4], norminput[5:]
if pattern == 'fl. NNNN-NNN-':
return pattern[:8], pattern[9:], norminput[:8], norminput[9:]
if pattern == 'NNNN av. j.-c.-NNNN':
return pattern[:-5], pattern[-4:], norminput[:-5], norminput[-4:]
if pattern == 'NNNN-NN-NN-':
return pattern[:-1], None, norminput[:-1], None
if pattern == 'NN-- -NNNN':
return pattern[:4], pattern[-4:], norminput[:4], norminput[-4:]
if pattern == 'NNNN-NN-NN':
return pattern, None, norminput, None
if pattern == 'NN..-NNNN? av. j.-c.':
return pattern[:4], pattern[5:], norminput[:4], norminput[5:]
if pattern == 'NNNN--...':
return pattern[:4], pattern[6:], norminput[:4], norminput[6:]
if pattern == 'fl. NNN--NNNN':
return pattern[:8], pattern[-4:], norminput[:8], norminput[-4:]
if pattern == 'fl. NN---NNNN':
return pattern[:8], pattern[-4:], norminput[:8], norminput[-4:]
if pattern == 'NN---NNNN?':
return pattern[:4], pattern[5:], norminput[:4], norminput[5:]
if pattern == 'fl. NNN--NNN-':
return pattern[:8], pattern[-4:], norminput[:8], norminput[-4:]
if pattern == 'NN..-NN.. av. j.-c.':
return pattern[:4], pattern[5:], norminput[:4], norminput[5:]
if pattern == 'NN--':
return pattern, None, norminput, None
if pattern == 'fl. NN--':
return pattern, None, norminput, None
if pattern == 'NN..?-NN..? av. j.-c.':
return pattern[:5], pattern[6:], norminput[:5], norminput[6:]
if pattern == 'NNN-NNN av. j.-c.':
return pattern[:3], pattern[4:], norminput[:3], norminput[4:]
if pattern == 'NN---NN--':
return pattern[:4], pattern[5:], norminput[:4], norminput[5:]
if pattern == 'NNN--NNN-':
return pattern[:4], pattern[5:], norminput[:4], norminput[5:]
if pattern == 'NN-..-NN..':
return pattern[:2]+pattern[3:5], pattern[6:], norminput[:2]+norminput[3:5], norminput[6:]
if pattern == 'NN---':
return pattern[:-1], None, norminput[:-1], None
if pattern == 'NNNN?-NNNN?':
return pattern[:5], pattern[6:], norminput[:5], norminput[6:]
if pattern == 'NNNN-NN-NN-NNNN-NN-NN':
return pattern[:10], pattern[11:], norminput[:10], norminput[11:]
if pattern == 'NNNN-N-NN':
return pattern, None, norminput, None
if pattern == 'NNNN-N-N':
return pattern, None, norminput, None
if pattern == 'NNNN-NNNN-NN-NN':
return pattern[:4], pattern[6:], norminput[:4], norminput[6:]
if pattern == 'NNNN-N-NN-NNNN-N-NN':
return pattern[:9], pattern[10:], norminput[:9], norminput[10:]
if pattern == 'NNNN-NN-NN-NNNN':
return pattern[:10], pattern[11:], norminput[:10], norminput[11:]
if pattern == 'NNNN-N-NN-NNNN-N-N':
return pattern[:9], pattern[10:], norminput[:9], norminput[10:]
if pattern == 'NNNN-N-N-NNNN-N-NN':
return pattern[:8], pattern[9:], norminput[:8], norminput[9:]
if pattern == 'NNNN-N-NN-NNNN-NN-NN':
return pattern[:9], pattern[10:], norminput[:9], norminput[10:]
if pattern == 'NNNN-NN-NN-NNNN-N-NN':
return pattern[:10], pattern[11:], norminput[:10], norminput[11:]
if pattern == 'month NN NNNN-NNNN-NN-NN':
p = pattern.split('-', 1)
n = norminput.split('-', 1)
return p[0], p[1], n[0], n[1]
if pattern == 'NN month NNNN-NNNN-NN-NN':
p = pattern.split('-', 1)
n = norminput.split('-', 1)
return p[0], p[1], n[0], n[1]
if pattern == 'NNNN-N-N-NNNN-N-N':
return pattern[:8], pattern[9:], norminput[:8], norminput[9:]
if pattern == '-NNNN-NN-NN':
return None, pattern[1:], None, norminput[1:]
if pattern == 'NNNN-NN-NN-month NN NNNN':
return pattern[:10], pattern[11:], norminput[:10], norminput[11:]
if pattern == 'NNNN-N-N-NNNN-NN-NN':
return pattern[:8], pattern[9:], norminput[:8], norminput[9:]
if pattern == 'NNNN-NN-NN-NNNN-N-N':
return pattern[:10], pattern[11:], norminput[:10], norminput[11:]
if pattern == 'NNNN-NN-NN-NN month NNNN':
return pattern[:10], pattern[11:], norminput[:10], norminput[11:]
if pattern == 'NNNN-NN-N-NNNN-N-NN':
return pattern[:9], pattern[10:], norminput[:9], norminput[10:]
if pattern == 'NNNN-N-NN-NNNN-NN-N':
return pattern[:9], pattern[10:], norminput[:9], norminput[10:]
if pattern == 'month N NNNN-NNNN-NN-NN':
p = pattern.split('-', 1)
n = norminput.split('-', 1)
return p[0], p[1], n[0], n[1]
if pattern == 'NNNN-N-N-month NN NNNN':
return pattern[:8], pattern[9:], norminput[:8], norminput[9:]
if pattern == 'NNNN-NN-NN-month N NNNN':
return pattern[:10], pattern[11:], norminput[:10], norminput[11:]
if pattern == 'NNNN-NN-NN-N month NNNN':
return pattern[:10], pattern[11:], norminput[:10], norminput[11:]
if pattern == 'NNNN-NN-N-NNNN-NN-NN':
return pattern[:9], pattern[10:], norminput[:9], norminput[10:]
if pattern == 'N month NNNN-NNNN-NN-NN':
p = pattern.split('-', 1)
n = norminput.split('-', 1)
return p[0], p[1], n[0], n[1]
if pattern == 'NNNN-NN-NN-NNNN-NN-N':
return pattern[:10], pattern[11:], norminput[:10], norminput[11:]
if pattern == 'NNNN-NN-NN-NNNN/NN/NN':
return pattern[:10], pattern[11:], norminput[:10], norminput[11:]
if pattern == 'NNNN-N-N-NNNN-NN-N':
return pattern[:8], pattern[9:], norminput[:8], norminput[9:]
if pattern == 'NNNN-N-NN-NNNN':
return pattern[:9], pattern[10:], norminput[:9], norminput[10:]
if pattern == 'NNNN-NN-NN-month NNNN':
return pattern[:10], pattern[11:], norminput[:10], norminput[11:]
if pattern == 'NNNN-NN-N-NNNN-N-N':
return pattern[:9], pattern[10:], norminput[:9], norminput[10:]
if pattern == 'NNNN-NN-NN}}':
return pattern, None, norminput, None
if pattern == 'NN-NN-NNNN-NN-NN-NNNN':
return pattern[:10], pattern[11:], norminput[:10], norminput[11:]
if pattern == 'NNNN-N-N-month N NNNN':
return pattern[:8], pattern[9:], norminput[:8], norminput[9:]
if pattern == 'NNNN-NNNN-N-NN':
return pattern[:4], pattern[5:], norminput[:4], norminput[5:]
if pattern == 'NNNN-N-NN-month NNNN':
return pattern[:9], pattern[10:], norminput[:9], norminput[10:]
if pattern == 'c. NNNN-NNNN-NN-NN':
return pattern[:7], pattern[8:], norminput[:7], norminput[8:]
if pattern == 'NNNN-N-N-NNNN':
pattern[:4], pattern[5:], norminput[:4], norminput[5:]
return None | 5e005b0537d123607225ed82163cac07f578a755 | 3,655,961 |
def defaultPolynomialLoad():
"""
pytest fixture that returns a default PolynomialLoad object
:return: PolynomialLoad object initialized with default values
"""
return PolynomialStaticLoad() | 75b989dc80e7ccf4e9a091de2dcdeb8758b465b3 | 3,655,962 |
def calc_pi(iteration_count, cores_usage):
"""
We calculate pi using Ulam's Monte Carlo method. See the module
documentation. The calculated value of pi is returned.
We use a process pool to offer the option of spreading the
calculation across more then one core.
iteration_count is the number of iterations that are run.
cores_usage is the number of processes to use.
"""
# We're using a multiprocessing pool here, to take advantage of
# multi-core CPUs.
# Calculate stuff for the pool.
pool_size = cores_usage
iterations_per_process = iteration_count // pool_size
work_list = [iterations_per_process] * pool_size
work_list[0] += iteration_count % pool_size
# Set up the pool.
calc_pool = mp.Pool(pool_size)
# Use the pool to obtain random points in the unit circle.
# We'll let the system determine the chunk size.
in_circle_total = sum(calc_pool.map(
count_is_in_cirle,
work_list))
# Finish the calculation. in_circle_total, divided by the total
# number of iterations, is the area of the unit circle
# relative to the [-1, 1] square. Multiply by 4, which is the area
# of the [-1, 1] square, to get the area of the unit circle.
# .NOTE. If you modify this program to run in Python 2.7, remember
# to modify this calculation to use floating point division (or
# import division from future).
return 4 * in_circle_total / iteration_count | 7b4db8f0936995f46a42fedb4d5539cd3057eb01 | 3,655,963 |
import requests
def send_postatus_errors():
"""Looks at postatus file and sends an email with errors"""
# Gah! Don't do this on stage!
if settings.STAGE:
return
def new_section(line):
return (line.startswith('dennis ')
or line.startswith('Totals')
or line.startswith('BUSTED')
or line.startswith('COMPILED'))
# Download the postatus file
postatus = requests.get('https://support.mozilla.org/media/postatus.txt')
# Parse it to see which locales have issues
lines = postatus.content.splitlines()
datestamp = lines.pop(0)
errordata = []
while lines:
line = lines.pop(0)
if line.startswith('>>> '):
while lines and not new_section(line):
errordata.append(line)
line = lines.pop(0)
# If we have errors to send, send them
if errordata:
mail_admins(
subject='[SUMO] postatus errors %s' % datestamp,
message=(
'These are the errors in the SUMO postatus file.\n' +
'See http://postatus.paas.allizom.org/p/SUMO for details\n' +
'and bug generation links.\n\n' +
'\n'.join(errordata)
)
) | 8eadab8bce8155d18b805ee04668cb90a400659c | 3,655,964 |
def pair_range_from_to(x): # cpdef pair_range(np.ndarray[long,ndim=1] x):
"""
Returns a list of half-cycle-amplitudes
x: Peak-Trough sequence (integer list of local minima and maxima)
This routine is implemented according to
"Recommended Practices for Wind Turbine Testing - 3. Fatigue Loads", 2. edition 1990, Appendix A
except that a list of half-cycle-amplitudes are returned instead of a from_level-to_level-matrix
"""
x = x - np.min(x)
k = np.max(x)
n = x.shape[0]
S = np.zeros(n + 1)
A = np.zeros((k + 1, k + 1))
S[1] = x[0]
ptr = 1
p = 1
q = 1
f = 0
# phase 1
while True:
p += 1
q += 1
# read
S[p] = x[ptr]
ptr += 1
if q == n:
f = 1
while p >= 4:
#print S[p - 3:p + 1]
#print S[p - 2], ">", S[p - 3], ", ", S[p - 1], ">=", S[p - 3], ", ", S[p], ">=", S[p - 2], (S[p - 2] > S[p - 3] and S[p - 1] >= S[p - 3] and S[p] >= S[p - 2])
#print S[p - 2], "<", S[p - 3], ", ", S[p - 1], "<=", S[p - 3], ", ", S[p], "<=", S[p - 2], (S[p - 2] < S[p - 3] and S[p - 1] <= S[p - 3] and S[p] <= S[p - 2])
#print (S[p - 2] > S[p - 3] and S[p - 1] >= S[p - 3] and S[p] >= S[p - 2]) or (S[p - 2] < S[p - 3] and S[p - 1] <= S[p - 3] and S[p] <= S[p - 2])
if (S[p - 2] > S[p - 3] and S[p - 1] >= S[p - 3] and S[p] >= S[p - 2]) or \
(S[p - 2] < S[p - 3] and S[p - 1] <= S[p - 3] and S[p] <= S[p - 2]):
A[S[p - 2], S[p - 1]] += 1
A[S[p - 1], S[p - 2]] += 1
S[p - 2] = S[p]
p -= 2
else:
break
if f == 1:
break # q==n
# phase 2
q = 0
while True:
q += 1
if p == q:
break
else:
#print S[q], "to", S[q + 1]
A[S[q], S[q + 1]] += 1
return A | 96d86079b971bda58fd2d0af440feecc8fa4c1fd | 3,655,965 |
def serialize_action(
action: RetroReaction, molecule_store: MoleculeSerializer
) -> StrDict:
"""
Serialize a retrosynthesis action
:param action: the (re)action to serialize
:param molecule_store: the molecule serialization object
:return: the action as a dictionary
"""
dict_ = action.to_dict()
dict_["mol"] = molecule_store[dict_["mol"]]
dict_["class"] = f"{action.__class__.__module__}.{action.__class__.__name__}"
return dict_ | f35c0a34cc6778a39c991edafdda6bd30aea4886 | 3,655,966 |
def convert(obj, totype, debug=False, **kwargs):
"""Converto object obj to type totype.
The converter is chosen from gna.converters dictionary based on the type(obj) or one of it's base classes.
:obj: object to convert
:totype: the target type
Order:
1. Set type to type(obj).
2. Try to find converter for the current type. Return if found.
3. Try to find 'base' converter for the current type. Convert obj to base and return convert(newobj) if 'base' converter found.
4. Set type to next base type of obj. Repeat from 2.
Example:
convert( N.array([1, 2, 3]), R.vector('double') )
convert( N.array([1, 2, 3]), R.vector, dtype='double' )
"""
def msg( title, converter=None ):
res = title
if converter:
res+= ' '+converter.__name__
typestr = isinstance(totype, str) and totype or totype.__name__
res+=" to convert '{0}' ({1}) to '{2}'".format(
type(obj).__name__,
', '.join([base.__name__ for base in bases]),
typestr
)
if kwargs:
res+=' [kwargs: %s]'%( str( kwargs ) )
return res
bases = getmro(type(obj))
for base in bases:
bconverters = converters.get( base )
if not bconverters:
continue
converter = bconverters.get( totype )
if converter:
break
if 'base' in bconverters:
if debug:
print( 'Convert', type(obj).__name__, 'to base' )
return convert( bconverters['base'](obj), totype, debug, **kwargs )
else:
raise Exception(msg('Can not find converter'))
if debug:
print( msg( 'Using converter', converter ) )
return converter( obj, **kwargs ) | b63d35b193833ef9432cb9752c87257f35cf0210 | 3,655,967 |
import string
def complement(s):
"""
Return complement of 's'.
"""
c = string.translate(s, __complementTranslation)
return c | 7dab43db51bc5a3bb7321deebdb8122792f08d86 | 3,655,968 |
import copy
def get_state_transitions(actions):
"""
get the next state
@param actions:
@return: tuple (current_state, action, nextstate)
"""
state_transition_pairs = []
for action in actions:
current_state = action[0]
id = action[1][0]
next_path = action[1][1]
next_state = copy.deepcopy(current_state)
if 'NoTrans' not in id:
# change the state
next_state[id] = next_path
state_transition_pairs.append((current_state, action[1], next_state))
return state_transition_pairs | bbed37ed6469f5635fbc65fa07195114b4bb3dac | 3,655,969 |
import struct
def parse_pascal_string(characterset, data):
"""
Read a Pascal string from a byte array using the given character set.
:param characterset: Character set to use to decode the string
:param data: binary data
:return: tuple containing string and number of bytes consumed
"""
string_size_format, string_size_size, character_size = get_string_size_format(characterset)
if len(data) < string_size_size:
raise FileParseException("String size truncated")
string_size = struct.unpack("<" + string_size_format, data[0:string_size_size])[0] * character_size
string_data = data[string_size_size:string_size_size + string_size]
result = string_data.decode(CHARACTER_SETS[characterset])
total_size = string_size_size + string_size
return result, total_size | eabdfe1f6fb864eead1345016495f64c5457727e | 3,655,970 |
def folder(initial=None, title='Select Folder'):
"""Request to select an existing folder or to create a new folder.
Parameters
----------
initial : :class:`str`, optional
The initial directory to start in.
title : :class:`str`, optional
The text to display in the title bar of the dialog window.
Returns
-------
:class:`str`
The name of the selected folder or :obj:`None` if the user cancelled
the request to select a folder.
"""
app, title = _get_app_and_title(title)
name = QtWidgets.QFileDialog.getExistingDirectory(app.activeWindow(), title, initial)
return name if len(name) > 0 else None | 60331e1a89241595e09e746901fff656f8d4365a | 3,655,971 |
from typing import Sequence
import scipy
def build_clusters(
metadata: pd.DataFrame,
ipm_regions: Sequence[str],
min_capacity: float = None,
max_clusters: int = None,
) -> pd.DataFrame:
"""Build resource clusters."""
if max_clusters is None:
max_clusters = np.inf
if max_clusters < 1:
raise ValueError("Max number of clusters must be greater than zero")
df = metadata
cdf = _get_base_clusters(df, ipm_regions).sort_values("lcoe")
if cdf.empty:
raise ValueError(f"No resources found in {ipm_regions}")
if min_capacity:
# Drop clusters with highest LCOE until min_capacity reached
end = cdf["mw"].cumsum().searchsorted(min_capacity) + 1
if end > len(cdf):
raise ValueError(
f"Capacity in {ipm_regions} ({cdf['mw'].sum()} MW) less than minimum ({min_capacity} MW)"
)
cdf = cdf[:end]
# Track ids of base clusters through aggregation
cdf["ids"] = [[x] for x in cdf["id"]]
# Aggregate clusters within each metro area (metro_id)
while len(cdf) > max_clusters:
# Sort parents by lowest LCOE distance of children
diff = lambda x: abs(x.max() - x.min())
parents = (
cdf.groupby("parent_id", sort=False)
.agg(child_ids=("id", list), n=("id", "count"), lcoe=("lcoe", diff))
.sort_values(["n", "lcoe"], ascending=[False, True])
)
if parents.empty:
break
if parents["n"].iloc[0] == 2:
# Choose parent with lowest LCOE
best = parents.iloc[0]
# Compute parent
parent = pd.Series(
_merge_children(
cdf.loc[best["child_ids"]],
ids=_flat(*cdf.loc[best["child_ids"], "ids"]),
**df.loc[best.name],
)
)
# Add parent
cdf.loc[best.name] = parent
# Drop children
cdf.drop(best["child_ids"], inplace=True)
else:
# Promote child with deepest parent
parent_id = df.loc[parents.index, "cluster_level"].idxmax()
parent = df.loc[parent_id]
child_id = parents.loc[parent_id, "child_ids"][0]
# Update child
columns = ["id", "parent_id", "cluster_level"]
cdf.loc[child_id, columns] = parent[columns]
# Update index
cdf.rename(index={child_id: parent_id}, inplace=True)
# Keep only computed columns
columns = _flat(MEANS, SUMS, "ids")
columns = [col for col in columns if col in cdf.columns]
cdf = cdf[columns]
cdf.reset_index(inplace=True, drop=True)
if len(cdf) > max_clusters:
# Aggregate singleton metro area clusters
Z = scipy.cluster.hierarchy.linkage(cdf[["lcoe"]].values, method="ward")
# TODO: Store mask in temporary table
cdf["_keep"] = True
for child_idx in Z[:, 0:2].astype(int):
cdf.loc[child_idx, "_keep"] = False
parent = _merge_children(
cdf.loc[child_idx], _keep=True, ids=_flat(*cdf.loc[child_idx, "ids"])
)
cdf.loc[len(cdf)] = parent
if not cdf["_keep"].sum() > max_clusters:
break
cdf = cdf[cdf["_keep"]]
return cdf[columns] | 32e527760c8a06799a41f3cfce0ed5ba27df5b8b | 3,655,972 |
import tqdm
from typing import Any
from typing import Optional
def tqdm_hook(t: tqdm) -> Any:
"""Progressbar to visualisation downloading progress."""
last_b = [0]
def update_to(b: int = 1, bsize: int = 1, t_size: Optional[int] = None) -> None:
if t_size is not None:
t.total = t_size
t.update((b - last_b[0]) * bsize)
last_b[0] = b
return update_to | ff075a946ea9cf2d124d9d5e93fb83d31f2e0623 | 3,655,973 |
def check_regular_timestamps(
time_series: TimeSeries, time_tolerance_decimals: int = 9, gb_severity_threshold: float = 1.0
):
"""If the TimeSeries uses timestamps, check if they are regular (i.e., they have a constant rate)."""
if (
time_series.timestamps is not None
and len(time_series.timestamps) > 2
and check_regular_series(series=time_series.timestamps, tolerance_decimals=time_tolerance_decimals)
):
timestamps = np.array(time_series.timestamps)
if timestamps.size * timestamps.dtype.itemsize > gb_severity_threshold * 1e9:
severity = Severity.HIGH
else:
severity = Severity.LOW
return InspectorMessage(
severity=severity,
message=(
"TimeSeries appears to have a constant sampling rate. "
f"Consider specifying starting_time={time_series.timestamps[0]} "
f"and rate={time_series.timestamps[1] - time_series.timestamps[0]} instead of timestamps."
),
) | 0c44f2b26a71e76b658180e1817cc3dfbeb375e0 | 3,655,974 |
def test_device_bypass(monkeypatch):
"""Test setting the bypass status of a device."""
_was_called = False
def _call_bypass(url, body, **kwargs):
nonlocal _was_called
assert url == "/appservices/v6/orgs/Z100/device_actions"
assert body == {"action_type": "BYPASS", "device_id": [6023], "options": {"toggle": "OFF"}}
_was_called = True
return StubResponse(None, 204)
api = call_cbcloud_api()
patch_cbc_sdk_api(monkeypatch, api, POST=_call_bypass)
api.device_bypass([6023], False)
assert _was_called | 17e2a2f1f7c8ef1a7ef32e8aacb40f8f7fe16c53 | 3,655,975 |
import re
import importlib
def import_config_module( cfg_file ):
""" Returns valid imported config module.
"""
cfg_file = re.sub( r'\.py$', '', cfg_file )
cfg_file = re.sub( r'-', '_', cfg_file )
mod_name = 'config.' + cfg_file
cfg_mod = importlib.import_module( mod_name )
if not hasattr( cfg_mod, 'pre_start_config' ):
raise ImportError( 'Config file must define \'pre_start_config\' method' )
if not hasattr( cfg_mod, 'post_start_config' ):
raise ImportError( 'Config file must define \'post_start_config\' method' )
return cfg_mod | 4cb25a56df0f26f0f3c4917aad2ca4cd40e4797f | 3,655,976 |
import multiprocessing
def process_batches(args, batches):
"""Runs a set of batches, and merges the resulting output files if more
than one batch is included.
"""
nbatches = min(args.nbatches, len(batches))
pool = multiprocessing.Pool(nbatches, init_worker_thread)
try:
batches = pool.imap(run_batch, batches, 1)
if not merge_batch_results(batches):
pool.terminate()
pool.join()
return 1
pool.close()
pool.join()
return 0
except:
pool.terminate()
pool.join()
raise | dbd893773e6a5fed1d68a48c875741e4ce963ae6 | 3,655,977 |
def tripledes_cbc_pkcs5_decrypt(key, data, iv):
"""
Decrypts 3DES ciphertext in CBC mode using either the 2 or 3 key variant
(16 or 24 byte long key) and PKCS#5 padding.
:param key:
The encryption key - a byte string 16 or 24 bytes long (2 or 3 key mode)
:param data:
The ciphertext - a byte string
:param iv:
The initialization vector - a byte string 8-bytes long
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by OpenSSL
:return:
A byte string of the plaintext
"""
if len(key) != 16 and len(key) != 24:
raise ValueError(pretty_message(
'''
key must be 16 bytes (2 key) or 24 bytes (3 key) long - is %s
''',
len(key)
))
if len(iv) != 8:
raise ValueError(pretty_message(
'''
iv must be 8 bytes long - is %s
''',
len(iv)
))
ctx = triple_des(key, mode=DES_CBC, IV=iv, padmode=DES_PAD_PKCS5)
return ctx.decrypt(data) | bf6d7efaade2cb7ce2f6abf7cea89a04fdbb3897 | 3,655,978 |
def kruskal_chi2_test(data=None, alpha=0.05, precision=4):
"""
col = 要比較的 target
row = data for each target
"""
if type(data) == pd.DataFrame:
data = data.copy().to_numpy()
alldata = np.concatenate(data.copy())
else:
alldata = np.concatenate(data.copy())
k = data.shape[1]
alldata.sort()
tmp_df = pd.DataFrame(({'value': alldata}))
tmp_df['rank'] = tmp_df.index + 1 # rank
value_to_rank = tmp_df.groupby('value').mean().reset_index()
T = []
sample_rank_df = []
for i in range(k):
samp = pd.DataFrame(
{'value': data[:, i][~np.isnan(data[:, i])]})
samp = pd.merge(samp, value_to_rank)
sample_rank_df.append(samp)
T.append(samp['rank'].sum())
n = [len(data[:, i][~np.isnan(data[:, i])]) for i in range(k)]
# print(T)
# print(n)
rule_of_five_str = ""
if (np.sum(np.array(n) < 5) > 0):
rule_of_five_str += "!(At least one sample size is less than 5)"
else:
rule_of_five_str += "(All sample size >= 5)"
N = np.sum(n)
t_over_n = 0
for i in range(k):
t_over_n += T[i] ** 2 / n[i]
H = 12 / N / (N + 1) * t_over_n - 3 * (N + 1)
p_value = 1 - stats.chi2.cdf(H, k - 1)
chi2_stat = stats.chi2.ppf(1 - alpha, k - 1)
result_dict = {'H': H, 'p-value': p_value,
'T': T, 'sample_rank_df': sample_rank_df}
flag = p_value < alpha
result = f'''======= Kruskal-Wallis Test with Chi-squared Test =======
{rule_of_five_str}
H statistic value (observed) = {H:.{precision}f}
chi2 critical value = {chi2_stat:.{precision}f}
p-value = {p_value:.{precision}f} ({inter_p_value(p_value)})
Reject H_0 (Not all {k} population locations are the same) → {flag}
'''
print(result)
return result_dict | 3b89e14e7072cbb6375b0c1ead8320c5643aacd1 | 3,655,979 |
def add_new_action(action, object_types, preferred, analyst):
"""
Add a new action to CRITs.
:param action: The action to add to CRITs.
:type action: str
:param object_types: The TLOs this is for.
:type object_types: list
:param preferred: The TLOs this is preferred for.
:type preferred: list
:param analyst: The user adding this action.
:returns: True, False
"""
action = action.strip()
idb_action = Action.objects(name=action).first()
if not idb_action:
idb_action = Action()
idb_action.name = action
idb_action.object_types = object_types
idb_action.preferred = []
prefs = preferred.split('\n')
for pref in prefs:
cols = pref.split(',')
if len(cols) != 3:
continue
epa = EmbeddedPreferredAction()
epa.object_type = cols[0].strip()
epa.object_field = cols[1].strip()
epa.object_value = cols[2].strip()
idb_action.preferred.append(epa)
try:
idb_action.save(username=analyst)
except ValidationError:
return False
return True | 2b54c3766d9793a1c6598402bf7a5b1103bb324b | 3,655,980 |
import multiprocessing
import asyncio
def test_PipeJsonRpcSendAsync_5():
"""
Specia test case.
Two messages: the first message times out, the second message is send before the response
from the first message is received. Verify that the result returned in response to the
second message is received. (We discard the result of the message that is timed out.)
"""
def method_handler1():
ttime.sleep(0.7)
return 39
def method_handler2():
ttime.sleep(0.2)
return 56
conn1, conn2 = multiprocessing.Pipe()
pc = PipeJsonRpcReceive(conn=conn2, name="comm-server")
pc.add_method(method_handler1, "method1")
pc.add_method(method_handler2, "method2")
pc.start()
async def send_messages():
p_send = PipeJsonRpcSendAsync(conn=conn1, name="comm-client")
p_send.start()
# Submit multiple messages at once. Messages should stay at the event loop
# and be processed one by one.
with pytest.raises(CommTimeoutError):
await p_send.send_msg("method1", timeout=0.5)
result = await p_send.send_msg("method2", timeout=0.5)
assert result == 56, "Incorrect result received"
p_send.stop()
asyncio.run(send_messages())
pc.stop() | a1939e03f4c9992d84ac52a35b975f20077a2161 | 3,655,981 |
from typing import Collection
import os
import json
def RACEDataset(race_type):
"""
Loads a RACE dataset given the type (see the RACEType enum).
Any error during reading will generate an exception.
Returns a Pandas DataFrame with 5 columns:
* 'article': string
* 'question': string
* 'answers': list[string], length = 4
* 'correct': oneof('A', 'B', 'C', D')
* 'id': string
The returned IDs are unique and have this format: `index`-`passage_id`.
Examples: 1-middle1548.txt, 2-middle1548.txt, etc. The `passage_id` is
frequently the name of the file. All the questions related to the same
passage are grouped in the same file in the RACE dataset (convention).
Because in each RACE file there are multiple questions, the counter is
necessary in order to guarantee that IDs are unique (the file name is
not sufficient). We translate the `passage_id` into the `question_id`
using the per-passage-question counter.
"""
assert(isinstance(race_type, RACEType))
download_dataset(Collection.RACE, check_shallow_integrity)
dirpath = type_to_data_directory(race_type)
all_data = []
q_ids = {}
for path in os.listdir(dirpath):
assert(os.path.isfile(os.path.join(dirpath, path)))
with open(os.path.join(dirpath, path), 'rt') as f:
entry = json.load(f)
"""
Each passage is a JSON file. The JSON file contains these fields:
1. article: A string, which is the passage.
2. questions: A string list. Each string is a query. We have two
types of questions. First one is an interrogative
sentence. Another one has a placeholder, which is
represented by _.
3. options: A list of the options list. Each options list contains
4 strings, which are the candidate option.
4. answers: A list contains the golden label of each query.
5. id: Each passage has an id in this dataset. Note: the ids are
not unique in the question set! Questions in the same file
have the same id (the name of the file). This id is more of
a passage id than a question id.
"""
assert(len(entry) == 5)
assert(set(entry.keys()) == {
"article",
"questions",
"options",
"answers",
"id"
})
article = entry["article"]
questions = entry["questions"]
options = entry["options"]
answers = entry["answers"]
q_id = entry["id"]
assert(isinstance(article, string_types))
assert(isinstance(questions, list))
assert(isinstance(options, list))
assert(isinstance(answers, list))
assert(isinstance(q_id, string_types))
assert(len(questions) == len(options))
assert(len(questions) == len(answers))
for question, option, answer in zip(questions, options, answers):
assert(isinstance(question, string_types))
assert(isinstance(option, list) and len(option) == 4)
assert(isinstance(answer, string_types))
assert(answer in ["A", "B", "C", "D"])
all_data.append({
'article': article,
'question': question,
'answers': option,
'correct': answer,
'id': next_question_id(q_ids, q_id)
})
df = pd.DataFrame(all_data)
return df | 84407168b73ccdb954dc441a95117fbadf6a0ed1 | 3,655,982 |
import re
def tpc(fastas, **kw):
"""
Function to generate tpc encoding for protein sequences
:param fastas:
:param kw:
:return:
"""
AA = kw['order'] if kw['order'] is not None else 'ACDEFGHIKLMNPQRSTVWY'
encodings = []
triPeptides = [aa1 + aa2 + aa3 for aa1 in AA for aa2 in AA for aa3 in AA]
AADict = {}
for i in range(len(AA)):
AADict[AA[i]] = i
for i in fastas:
name, sequence = i[0], re.sub('-', '', i[1])
code = [name]
tmpCode = [0] * 8000
for j in range(len(sequence) - 3 + 1):
tmpCode[AADict[sequence[j]] * 400 + AADict[sequence[j + 1]] * 20 + AADict[sequence[j + 2]]] = \
tmpCode[AADict[sequence[j]] * 400 + AADict[sequence[j + 1]] * 20 + AADict[sequence[j + 2]]] + 1
if sum(tmpCode) != 0:
tmpCode = [i / sum(tmpCode) for i in tmpCode]
code = code + tmpCode
encodings.append(code)
return encodings | b8017356980b266d78d85a867aee97c0d79ec5e5 | 3,655,983 |
def _uninstall_flocker_centos7():
"""
Return an ``Effect`` for uninstalling the Flocker package from a CentOS 7
machine.
"""
return sequence([
run_from_args([
b"yum", b"erase", b"-y", b"clusterhq-python-flocker",
]),
run_from_args([
b"yum", b"erase", b"-y", b"clusterhq-release",
]),
]) | 0d8f068857cbc25743b644d067fe70efffb644f0 | 3,655,984 |
import logging
import os
import sys
def get_memory_banks_per_run(coreAssignment, cgroups):
"""Get an assignment of memory banks to runs that fits to the given coreAssignment,
i.e., no run is allowed to use memory that is not local (on the same NUMA node)
to one of its CPU cores."""
try:
# read list of available memory banks
allMems = set(cgroups.read_allowed_memory_banks())
result = []
for cores in coreAssignment:
mems = set()
for core in cores:
coreDir = "/sys/devices/system/cpu/cpu{0}/".format(core)
mems.update(_get_memory_banks_listed_in_dir(coreDir))
allowedMems = sorted(mems.intersection(allMems))
logging.debug(
"Memory banks for cores %s are %s, of which we can use %s.",
cores,
list(mems),
allowedMems,
)
result.append(allowedMems)
assert len(result) == len(coreAssignment)
if any(result) and os.path.isdir("/sys/devices/system/node/"):
return result
else:
# All runs get the empty list of memory regions
# because this system has no NUMA support
return None
except ValueError as e:
sys.exit("Could not read memory information from kernel: {0}".format(e)) | 607d55889e3c792c24a1976a75b28f24b9adea3d | 3,655,985 |
import requests
def authenticate(username, password):
"""Authenticate with the API and get a token."""
API_AUTH = "https://api2.xlink.cn/v2/user_auth"
auth_data = {'corp_id': "1007d2ad150c4000", 'email': username,
'password': password}
r = requests.post(API_AUTH, json=auth_data, timeout=API_TIMEOUT)
try:
return (r.json()['access_token'], r.json()['user_id'])
except KeyError:
raise(LaurelException('API authentication failed')) | 9675227b5ff4f58d79bafffc0407366a26d638bd | 3,655,986 |
def filter_hashtags_users(DATAPATH, th, city):
"""
cleans target_hashtags by removing hashtags that are used by less than 2 users
replaces hahstags by ht_id and saves to idhashtags.csv
creates entropy for each ht_id and saves to hashtag_id_entropies.csv
prints std output
:param DATAPATH:
:param th: hashtags are too popular if more than th% of users share them
:param city:
:return:
"""
ht = pd.read_csv(DATAPATH + city + ".target_hashtags")
print ("ht.shape", ht.shape)
ht["hashtags"] = ht['hashtags'].astype('category')
ht["ht_id"] = ht["hashtags"].cat.codes
ht.drop('hashtags', axis=1, inplace=True)
#arrmult = []
entarr = []
gp = ht.groupby('ht_id')
# cnt_df = gp.size().reset_index(name='sizes')
# hashtags are too popular if more than th% of users share them
max_df_ht = th * len(ht.uid.unique())
print ("max_df_ht", max_df_ht)
# removing hashtags that are used by less than 2 users and more than th% of users
for htid, group in gp:
user_count = len(group['uid'].value_counts().values)
if user_count > 1 and user_count <= max_df_ht:
e = entropy(group['uid'].value_counts().values)
c = len(group)
entarr.append([htid, e, c])
#arrmult.append(htid)
# save entropies of hashtags for other calculations
entdf = pd.DataFrame(data=entarr, columns=['ht_id', 'entropy', 'counts'])
sortt = entdf.sort_values(by='entropy')
sortt.to_csv(DATAPATH + "counts_entropies.csv", index=False)
# filtered hashtag df
ht2 = ht[ht.ht_id.isin(entdf.ht_id)]
print ("after removing too popular and too rare hts", ht2.shape)
ht2.to_csv(DATAPATH + str(th) + "filtered_hashtags.csv", index=False)
return entdf, ht2 | 60e0b02f9bbdccae32958717fd8608aa1932386e | 3,655,987 |
def cluster_set_state(connection: 'Connection', state: int, query_id=None) -> 'APIResult':
"""
Set cluster state.
:param connection: Connection to use,
:param state: State to set,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status if a value
is written, non-zero status and an error description otherwise.
"""
return __cluster_set_state(connection, state, query_id) | 88b05a617e17574961e44d5a88bec1ac4da0be95 | 3,655,988 |
def get_all(data, path):
"""Returns a list with all values in data matching the given JsonPath."""
return [x for x in iterate(data, path)] | 592fee87b3b4be171d4e4a19b013b99551768f75 | 3,655,989 |
def extract_information_from_blomap(oneLetterCodes):
"""
extracts isoelectric point (iep) and
hydrophobicity from blomap for each aminoacid
Parameters
----------
oneLetterCodes : list of Strings/Chars
contains oneLetterCode for each aminoacid
Returns
-------
float, float
iep, hydrophobicity
"""
letter_encodings = []
for x in oneLetterCodes:
letter_encodings.append(extended_blomap[x.upper()])
isoelectric_point = []
hydrophobicity = []
for element in letter_encodings:
isoelectric_point.append([element[7]])
hydrophobicity.append([element[8]])
return isoelectric_point, hydrophobicity | d36e16a0e35d744f1001752c98d09035b3e581c6 | 3,655,990 |
def partitions(n):
"""
Return a sequence of lists
Each element is a list of integers which sum to n -
a partition n.
The elements of each partition are in descending order
and the sequence of partitions is in descending lex order.
>>> list(partitions(4))
[[3, 1], [2, 2], [2, 1, 1], [1, 1, 1, 1]]
"""
return partitions_with_max(n, max=n - 1) | 042759c97031baee7c958d59b3a432b52111a696 | 3,655,991 |
def create_request_element(channel_id, file_info, data_id, annotation):
"""
create dataset item from datalake file
:param channel_id:
:param file_id:
:param file_info:
:param label_metadata_key:
:return:
"""
data_uri = 'datalake://{}/{}'.format(channel_id, file_info.file_id)
data = {
'source_data': [
{
'data_uri': data_uri,
'data_type': file_info.content_type
}
],
'attributes': {
'classification': annotation,
'id': data_id
}
}
return data | 9fad37428e2608d47b2d0d57d075c0fbd9292b46 | 3,655,992 |
from typing import Mapping
from typing import Iterable
def _categorise(obj, _regex_adapter=RegexAdapter):
"""
Check type of the object
"""
if obj is Absent:
return Category.ABSENT
obj_t = type(obj)
if issubclass(obj_t, NATIVE_TYPES):
return Category.VALUE
elif callable(obj):
return Category.CALLABLE
elif _regex_adapter.check(obj):
return Category.REGEX
elif issubclass(obj_t, Mapping):
return Category.DICT
elif issubclass(obj_t, Iterable):
return Category.ITERABLE
else: # catch-all for types like decimal.Decimal, uuid.UUID, et cetera
return Category.VALUE | 549f21bee43f619fea7c2a09940cda1ce03e4e8c | 3,655,993 |
def remove_key(d, key):
"""Safely remove the `key` from the dictionary.
Safely remove the `key` from the dictionary `d` by first
making a copy of dictionary. Return the new dictionary together
with the value stored for the `key`.
Parameters
----------
d : dict
The dictionary from which to remove the `key`.
key :
The key to remove
Returns
-------
v :
The value for the key
r : dict
The dictionary with the key removed.
"""
r = dict(d)
v = r[key]
del r[key]
return v, r | 5695b18675b52f4ca8bc3cba1ed0104425e7a04f | 3,655,994 |
import csv
import six
def tasks_file_to_task_descriptors(tasks, retries, input_file_param_util,
output_file_param_util):
"""Parses task parameters from a TSV.
Args:
tasks: Dict containing the path to a TSV file and task numbers to run
variables, input, and output parameters as column headings. Subsequent
lines specify parameter values, one row per job.
retries: Number of retries allowed.
input_file_param_util: Utility for producing InputFileParam objects.
output_file_param_util: Utility for producing OutputFileParam objects.
Returns:
task_descriptors: an array of records, each containing the task-id,
task-attempt, 'envs', 'inputs', 'outputs', 'labels' that defines the set of
parameters for each task of the job.
Raises:
ValueError: If no job records were provided
"""
task_descriptors = []
path = tasks['path']
task_min = tasks.get('min')
task_max = tasks.get('max')
# Load the file and set up a Reader that tokenizes the fields
param_file = dsub_util.load_file(path)
reader = csv.reader(param_file, delimiter='\t')
# Read the first line and extract the parameters
header = six.advance_iterator(reader)
job_params = parse_tasks_file_header(header, input_file_param_util,
output_file_param_util)
# Build a list of records from the parsed input file
for row in reader:
# Tasks are numbered starting at 1 and since the first line of the TSV
# file is a header, the first task appears on line 2.
task_id = reader.line_num - 1
if task_min and task_id < task_min:
continue
if task_max and task_id > task_max:
continue
if len(row) != len(job_params):
dsub_util.print_error('Unexpected number of fields %s vs %s: line %s' %
(len(row), len(job_params), reader.line_num))
# Each row can contain "envs", "inputs", "outputs"
envs = set()
inputs = set()
outputs = set()
labels = set()
for i in range(0, len(job_params)):
param = job_params[i]
name = param.name
if isinstance(param, job_model.EnvParam):
envs.add(job_model.EnvParam(name, row[i]))
elif isinstance(param, job_model.LabelParam):
labels.add(job_model.LabelParam(name, row[i]))
elif isinstance(param, job_model.InputFileParam):
inputs.add(
input_file_param_util.make_param(name, row[i], param.recursive))
elif isinstance(param, job_model.OutputFileParam):
outputs.add(
output_file_param_util.make_param(name, row[i], param.recursive))
task_descriptors.append(
job_model.TaskDescriptor({
'task-id': task_id,
'task-attempt': 1 if retries else None
}, {
'labels': labels,
'envs': envs,
'inputs': inputs,
'outputs': outputs
}, job_model.Resources()))
# Ensure that there are jobs to execute (and not just a header)
if not task_descriptors:
raise ValueError('No tasks added from %s' % path)
return task_descriptors | 7c195e8c09b439d39fca105fa3303f74c43538c1 | 3,655,995 |
import os
import torch
def load_model(filename, folder=None):
"""
Load a model from a file.
:param filename: name of the file to load the model from
:param folder: name of the subdirectory folder. If given, the model will be loaded from the subdirectory.
:return: model from the file
"""
if folder is not None:
path = os.path.join("./models", folder, filename)
else:
path = os.path.join("./models", filename)
model = torch.load(path, map_location='cuda:0')
return model | b4319796de4b05bf83d657c29d31124016dd9070 | 3,655,996 |
def spreadplayers(self: Client, x: RelativeFloat, y: RelativeFloat,
spread_distance: float, max_range: float,
victim: str) -> str:
"""Spreads players."""
return self.run('spreadplayers', x, y, spread_distance, max_range, victim) | 6577d7209d19a142ae9e02804b84af921df3224c | 3,655,997 |
def get_version():
"""Returns single integer number with the serialization version"""
return 2 | f25ad858441fcbb3b5353202a53f6ebaa8874e4d | 3,655,998 |
def format_result(func):
"""包装结果格式返回给调用者"""
@wraps(func)
def wrapper(*args, **kwargs):
ret = {}
try:
data = func(*args, **kwargs)
if type(data) is Response:
return data
ret['data'] = data
ret['success'] = True
ret['message'] = 'Succeed'
except Exception as e:
ret['message'] = str(e)
ret['data'] = None
ret['success'] = False
logger.info(f"request_{func}, result: {ret}")
return ret
return wrapper | 53109217a9fe6fbc00250a7b8dfd6b295e47e12b | 3,655,999 |
Subsets and Splits