content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def get_depth(da, errors="raise"):
"""Get or compute the depth coordinate
If a depth variable cannot be found, it tries to compute either
from sigma-like coordinates or from layer thinknesses.
Parameters
----------
{errors}
Return
------
xarray.DataArray or None
See also
--------
get_lon
get_lat
get_time
get_altitude
get_level
get_vertical
xoa.cf.CFSpecs.search_coord
xoa.sigma.decode_cf_sigma
xoa.grid.decode_cf_dz2depth
"""
cfspecs = xcf.get_cf_specs(da)
errors = misc.ERRORS[errors]
ztype = cfspecs["vertical"]["type"]
# From variable
depth = cfspecs.search(da, 'depth', errors="ignore")
if depth is not None:
return depth
if ztype == "z" or not hasattr(da, "data_vars"): # explicitly
msg = "No depth coordinate found"
if errors == "raise":
raise XoaError(msg)
xoa_warn(msg)
return
# Decode the dataset
if ztype == "sigma" or ztype is None:
err = "ignore" if ztype is None else errors
from .sigma import decode_cf_sigma
da = decode_cf_sigma(da, errors=err)
if "depth" in da:
return da.depth
if ztype == "dz2depth" or ztype is None:
err = "ignore" if ztype is None else errors
from .grid import decode_cf_dz2depth
da = decode_cf_dz2depth(da, errors=err)
if "depth" in da:
return da.depth
msg = "Can't infer depth coordinate from dataset"
if errors == "raise":
raise XoaError(msg)
xoa_warn(msg) | 5,356,100 |
def route53_scan(assets, record_value, record):
"""
Scan Route53
"""
for i, asset in enumerate(assets):
asset_type = asset.get_type()
if asset_type == 'EC2' and record_value in (asset.public_ip, asset.private_ip):
assets[i].dns_record = record['Name'].replace('\\052', '*')
elif asset_type == 'ELBV2' and record_value == f'{asset.name}.':
assets[i].dns_record = record['Name'].replace('\\052', '*')
return assets | 5,356,101 |
def process_language(text):
"""
Fetch from language processing API (cloud function)
:param text:
:return:
"""
# The language processing seems to fail without acsii decoding, ie remove emoji and chinese characters
request = {
"text": text.encode("ascii", errors="ignore").decode()
}
response = requests.post(LANGUAGE_PROCESSOR_API, json=request)
if response.status_code == 500:
print(f"Language processing error {response}")
return {}
else:
return response.json() | 5,356,102 |
def main() -> None:
"""
Plot the total burn and creep together with the creep categories for a certain sprint.
"""
root_path = Path(__file__).parents[1].resolve()
charts_dir = root_path.joinpath("charts")
sheet_dir = root_path.joinpath("data")
sprint_tasks_path = sheet_dir.joinpath("sprint_tasks.xlsx")
burndown_path = sheet_dir.joinpath("burndown.xlsx")
sprint_tasks = SprintTasks(
sprint_tasks_path=sprint_tasks_path, burndown_path=burndown_path
)
parser = argparse.ArgumentParser(description="Plot metrics specific for a sprint.")
parser.add_argument("-r", "--release", type=str, help="Release number")
parser.add_argument("-s", "--sprint_number", type=str, help="Sprint number")
parser.add_argument(
"-d",
"--days_off",
nargs="+",
type=str,
help="Days without development on the form yyyy-mm-dd (week-ends are inferred)",
)
args = parser.parse_args()
sprint_name = f"{args.release}-{args.sprint_number}"
total_sprint_burn_dfs = sprint_tasks.get_total_sprint_creep_and_burn()
burn_categories_df = sprint_tasks.get_burn_categories()
creep_categories_df = sprint_tasks.get_creep_categories()
burn_df = total_sprint_burn_dfs[sprint_name]
days_off = (
[pd.to_datetime(date) for date in args.days_off]
if args.days_off is not None
else None
)
sprint_dates = SprintDates(
sprint_tasks.burndown_sheets[sprint_name].index[0],
len(sprint_tasks.burndown_sheets[sprint_name].index),
days_off,
)
plot_sprint_burn_and_creep(burn_df, sprint_dates, charts_dir, sprint_name)
sprint_categories_df = burn_categories_df.loc[
burn_categories_df.index == sprint_name, :
].T
plot_sprint_categories(sprint_categories_df, charts_dir, sprint_name)
sprint_creep_df = creep_categories_df.loc[
creep_categories_df.index == sprint_name, :
].T
plot_sprint_creep_categories(sprint_creep_df, charts_dir, sprint_name) | 5,356,103 |
def to_int(text):
"""Text to integer."""
try:
return int(text)
except ValueError:
return '' | 5,356,104 |
def random_sample_datamap(datamap, size):
"""
returns a generator that samples from a datamap along the first dimension
"""
total_size = len(datamap.values()[0])
for v in datamap.values():
assert len(v) == total_size
while True:
res = {k: [] for k in datamap}
for _ in range(size):
# TODO allow seeding rng
idx = np.random.randint(total_size)
for k, v in datamap.items():
res[k].append(v[idx])
# TODO should this always cast to an array
res = {k: np.array(v) for k, v in res.items()}
yield res | 5,356,105 |
def get_speakerproposal_picture_upload_path():
"""
Must exist because it is mentioned in old migrations.
Can be removed when we clean up old migrations at some point
"""
pass | 5,356,106 |
def render_items(s_items: dict, all_items: Dict[str, Dict[str, str]], ostream: StringIO):
"""
args:
s_items (dict): the dictionary of capabilities belonging to the sample.
all_items (dict): the dictionary of all the reference capabilities (e.g. ATT&CK or MBC).
ostream (StringIO): the output stream to write the results to.
example::
key::root_val_1::child_val_1 key::root_val_1::child_val_2 [...] key::root_val_2::child_val_1 key::root_val_2::child_val_2 [...]
1 0 [...] 1 1 [...]
"""
for key, values in all_items.items():
s_values = s_items[key]
for id, _ in values.items():
search = "%s::%s" % (key, id)
if search in VERDICTS["malicious"] or search in VERDICTS["suspicious"]:
if s_values is None:
ostream.write(str(0))
else:
found = False
for (_, _, s_id) in s_values:
if id in s_id:
found = True
break
ostream.write(str(1)) if found else ostream.write(str(0))
ostream.write("\t") | 5,356,107 |
def parse_commonsense_reasoning_test(test_data_name):
"""Read JSON test data."""
with tf.gfile.Open(os.path.join(
FLAGS.data_dir, 'commonsense_test',
'{}.json'.format(test_data_name)), 'r') as f:
data = json.load(f)
question_ids = [d['question_id'] for d in data]
sentences = [tokenize(d['substitution']) for d in data]
labels = [d['correctness'] for d in data]
return question_ids, sentences, labels | 5,356,108 |
def flooding(loss, b):
"""flooding loss
"""
return (loss - b).abs() + b | 5,356,109 |
def CreateConditions(p,avec,bvec,indexgenerator=CreateLyndonIndices):
"""This creates the set of equations using by default the Lyndon Basis elements.
Parameters
----------
p : the considered order
avec: The set of symbols to use for the first operator.
bvec: The set of symbols to use for the second operator.
indexgenerator: (optional) by default we use indexgenerator for the Lyndon indices. Using CreateMuVectors
the indices from the overcomplete Hall-Basis can be used.
Returns
-------
array : An array of Equations that have to be satisfied to fulfill the requested order p.
"""
cvec=[*accumulate(avec)]
cvec[-1]=1
retval = [Eq(sum(avec),1)]
for k in range(1,p+1):
vecs=indexgenerator(p,k)
for mu in vecs:
retval.append(Eq(CreateEquation(mu,bvec,cvec),0))
return retval | 5,356,110 |
def with_conf_blddir(conf, name, body, func):
"""'Context manager' to execute a series of tasks into code-specific build
directory.
func must be a callable taking no arguments
"""
old_root, new_root = create_conf_blddir(conf, name, body)
try:
conf.bld_root = new_root
conf.bld_root.ctx.bldnode = new_root
return func()
finally:
conf.bld_root = old_root
conf.bld_root.ctx.bldnode = old_root | 5,356,111 |
def many_to_one(clsname, **kw):
"""Use an event to build a many-to-one relationship on a class.
This makes use of the :meth:`.References._reference_table` method
to generate a full foreign key relationship to the remote table.
"""
@declared_attr
def m2o(cls):
cls._references((cls.__name__, clsname))
return relationship(clsname, **kw)
return m2o | 5,356,112 |
def fullImport(arg):
""" Import only for design evaluation """
global evalDesignInfo
if not arg.onlyTable:
sys.path.append(os.path.join(os.getenv('CODE'),'sbml2doe'))
from stress_test_scripts.evaluateSBCDesign import evalDesignInfo | 5,356,113 |
def _get_dictionary_paths(d, main_list, tmp_list):
"""
Private method for setting the given main_list with lists containing all the key-paths of a dictionary.
For example: The key-paths of this list {a{b:{c:1, d:2}, e:3}} are a,b,c; a,b,d; a,e
:param d: the dictionary for gaining all the depth path
:param main_list: a list for creating al the lists containing the paths of the dictt
:param tmp_list: a temporary list for inserting the actual path
"""
for elem in d:
if isinstance(d[elem], dict):
tmp_list.append(elem)
_get_dictionary_paths(d[elem], main_list=main_list, tmp_list=tmp_list)
tmp_list.pop()
else:
tmp_list.append(elem)
main_list.append(deepcopy(tmp_list))
tmp_list.pop() | 5,356,114 |
def get_workflow_by_id(obj, pretty_print, beep,
id,
headers):
"""Returns a workflow specified by id.
"""
spinner = init_spinner(beep=beep)
start_spinner(spinner)
try:
if headers is not None:
headers = json.loads(headers)
result = obj.get_workflow_by_id(
id=id,
headers=headers)
stop_spinner(spinner)
opprint(result, indent=pretty_print)
except Exception as e:
stop_spinner(spinner)
tbprint()
eprint('Error:', e)
click.Context.exit(-1) | 5,356,115 |
def get_cnx(dbname=None, write=False):
"""Return a new connection to the database by the given name.
If 'dbname' is None, return a connection to the system database.
If the database file does not exist, it will be created.
The OS-level file permissions are set in DbSaver.
"""
if dbname is None:
dbname = constants.SYSTEM
dbpath = get_dbpath(dbname)
if write:
cnx = sqlite3.connect(dbpath)
else:
path = f"file:{dbpath}?mode=ro"
cnx = sqlite3.connect(dbpath, uri=True)
cnx.row_factory = sqlite3.Row
return cnx | 5,356,116 |
def nir_mean(msarr,nir_band=7):
"""
Calculate the mean of the (unmasked) values of the NIR (near infrared) band
of an image array. The default `nir_band` value of 7 selects the NIR2 band
in WorldView-2 imagery. If you're working with a different type of imagery,
you will need figure out the appropriate value to use instead.
Parameters
----------
msarr : numpy array (RxCxBands shape)
The multispectral image array. See `OpticalRS.RasterDS` for more info.
nir_band : int (Default value = 7)
The default `nir_band` value of 7 selects the NIR2 band in WorldView-2
imagery. If you're working with a different type of imagery, you will
need figure out the appropriate value to use instead. This is a zero
indexed number (the first band is 0, not 1).
Returns
-------
float
The mean radiance in the NIR band.
"""
return msarr[...,nir_band].mean() | 5,356,117 |
def decode(invoice) -> LightningInvoice:
"""
@invoice: is a str, bolt11.
"""
client = CreateLightningClient()
try:
decode_response = client.call("decode", invoice)
assert decode_response.get("error") is None
result = decode_response["result"]
assert result["valid"], "decode is invalid"
invoice = LightningInvoice()
invoice.msatoshi = result["msatoshi"]
invoice.description: str = result["description"]
return invoice
finally:
client.close() | 5,356,118 |
def main(args):
"""Scan through PDF and split PDF and images."""
filename = args[0]
split_path = args[1]
qr_prefix = args[2]
qr_suffix = args[3]
try:
os.chdir(split_path)
pdfPages = PdfFileReader(filename)
pdf_writer = PdfFileWriter()
i = cover_index = id_index = 0
page_count = 1
prev_file = ''
data = []
output = {}
for page_number in range(pdfPages.numPages):
# convert pdf to series of images for scanning
page = convert_from_bytes(
open(filename, 'rb').read(),
first_page=page_number+1, last_page=page_number+2)[0]
# increase contrast of image for better QR decoding
cv_img = numpy.array(page)
mask = cv2.inRange(cv_img, (0, 0, 0), (200, 200, 200))
inverted = 255 - cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
# decode img - only look for QR codes
val = pyzbar.decode(inverted, symbols=[ZBarSymbol.QRCODE])
if val != []:
# found a new qr code, split here
# convert byte literal to string
data = val[0][0].decode("utf-8")
if data == "none": # blank exam with 'none' qr code
data = "BLANK EXAM"
else:
pre = data[0:len(qr_prefix)]
suf = data[(len(data)-len(qr_suffix)):len(data)]
if qr_prefix != '' and pre == qr_prefix:
data = data[len(qr_prefix):]
if qr_suffix != '' and suf == qr_suffix:
data = data[:-len(qr_suffix)]
cover_index = i
cover_filename = '{}_{}_cover.pdf'.format(filename[:-4], i)
output_filename = '{}_{}.pdf'.format(filename[:-4], cover_index)
output[output_filename] = {}
output[output_filename]['id'] = data
# save pdf
if i != 0 and prev_file != '':
output[prev_file]['page_count'] = page_count
with open(prev_file, 'wb') as out:
pdf_writer.write(out)
page.save('{}.jpg'.format(prev_file[:-4]), "JPEG", quality=100)
if id_index == 1:
# correct first pdf's page count and print file
output[prev_file]['page_count'] = page_count
with open(prev_file, 'wb') as out:
pdf_writer.write(out)
page.save('{}.jpg'.format(prev_file[:-4]), "JPEG", quality=100)
# start a new pdf and grab the cover
cover_writer = PdfFileWriter()
pdf_writer = PdfFileWriter()
cover_writer.addPage(pdfPages.getPage(i))
pdf_writer.addPage(pdfPages.getPage(i))
# save cover
with open(cover_filename, 'wb') as out:
cover_writer.write(out)
# save cover image
page.save('{}.jpg'.format(cover_filename[:-4]), "JPEG", quality=100)
id_index += 1
page_count = 1
prev_file = output_filename
else:
# add pages to current split_pdf
page_count += 1
pdf_writer.addPage(pdfPages.getPage(i))
i += 1
# save whatever is left
output_filename = '{}_{}.pdf'.format(filename[:-4], cover_index)
output[output_filename]['id'] = data
output[output_filename]['page_count'] = page_count
with open(output_filename, 'wb') as out:
pdf_writer.write(out)
if not os.path.exists('decoded.json'):
# write json to file for parsing page counts and decoded ids later
with open('decoded.json', 'w') as out:
json.dump(output, out, sort_keys=True, indent=4)
else:
with open('decoded.json') as file:
prev_data = json.load(file)
prev_data.update(output)
with open('decoded.json', 'w') as out:
json.dump(prev_data, out)
# remove original, unsplit file
os.remove(filename)
except Exception:
print("\nbulk_qr_split.py: Failed when splitting pdf " + str(filename))
traceback.print_exc()
sys.exit(1) | 5,356,119 |
def get_wharton_sessionid(public=False):
""" Try to get a GSR session id. """
sessionid = request.args.get("sessionid")
cache_key = "studyspaces:gsr:sessionid"
if sessionid:
return sessionid
if public:
if db.exists(cache_key):
return db.get(cache_key).decode("utf8")
return os.environ.get("GSR_SESSIONID")
return None | 5,356,120 |
def timing ( name = '' , logger = None ) :
"""Simple context manager to measure the clock counts
>>> with timing () :
... whatever action is here
at the exit it prints the clock counts
>>> with timing () as c :
... whatever action is here
at the exit it prints the clock counts
>>> print c.delta
"""
return Timer ( name , logger ) | 5,356,121 |
def plotErrorEllipse(x, y, prob=[.68, .95, .997], **kwargs):
"""Compute and plot error ellipses around the mean of a 2d distribution.
Given two arrays, ``x`` and ``y`` where the values of each are drawn
from a Gaussian distribution, but the values of ``y`` are correlated with
the values of ``x``, compute and plot both the mean of the distribution
and the uncertainty ellipses around that mean.
Inputs:
-------------
x, y
(float) Input values. Require that ``len(x) == len(y)``
Optional Inputs:
-----------------
prob
(list or array) List of probability contours to draw. The defaults
are 68%, 95% and 99.7%. Each value must be in the range (0,1)
marker
(string) Marker type to plot the location of the mean. Default: 'o'
mfc
(string) Colour of above marker. Default is 'r', can be any legal
matplotlib colour specification.
ms
(int) Marker size. Default 12
Any other optional inputs are passed to matplotlib.patches.Ellipse
Returns:
----------
**None**
Notes:
---------
Adapted from `www.visiondummy.com <http://www.visiondummy.com/2014/04/draw-error-ellipse-representing-covariance-matrix/#Source_Code>`_
The general idea is as follows:
* Compute the covariance matrix for x and y.
* Compute the eigen vectors of the covariance matrix, corresponding
to vectors parallel to the major and minor axes.
* The eigen values correspond to the variances of the distribution
along the major and minor axes.
* To compute the lengths of the major and minor axes for a given confidence
interval, compute the chi^2 value that corresponds to that confidence
interval. The length of the semi major axis is then
:math:`\sqrt{ \chi^2 \sigma^2}`, where :math:`\sigma^2` is
the variance.
The above proceedure will give you confidence intervals for the
distribution (e.g the p=.95 ellipse will contain 95% of the points in the
distribution. To get the confidence interval on the mean, divde the
covariance matrix in the first step by :math:`\sqrt{N}` where N is the
number of input points.
"""
assert(len(x) == len(y))
prob = np.array(prob)
assert(len(prob) >= 1)
assert(np.all(prob > 0) & np.all(prob < 1))
if len(x) < 2:
raise ValueError("Need at least two points to generate centroid plots")
# Default values for mp.plot
marker = kwargs.pop('marker', 'o')
mfc = kwargs.pop('mfc', 'r')
ms = kwargs.pop('ms', 12)
# Plot the mean position
muX = np.mean(x)
muY = np.mean(y)
mp.plot(muX, muY, marker=marker, mfc=mfc, ms=ms)
# Default values for ellipse
if 'alpha' not in kwargs:
kwargs['alpha'] = .2
if 'color' not in kwargs:
kwargs['color'] = 'r'
# Dividing by root N gives the covariance matrix for the location of
# the mean.
cov = np.cov(x, y) / np.sqrt(len(x))
eigenVals, eigenVecs = np.linalg.eig(cov)
v1 = eigenVecs[:, 0]
# Matplotlib's Ellipse takes an angle not a pair of vectors.
angle_deg = np.degrees(np.arctan2(v1[1], v1[0]))
ax = mp.gca()
for p in prob:
scale = inverseChiSquare(1-p) # Convert prob to chisq
sma = np.sqrt(scale * eigenVals[0])
smi = np.sqrt(scale * eigenVals[1])
ell = Ellipse(xy=[muX,muY], width=2 * sma, height=2 * smi,
angle=angle_deg, **kwargs)
ax.add_patch(ell) | 5,356,122 |
def abs(x):
"""
complex-step safe version of numpy.abs function.
Parameters
----------
x : ndarray
array value to be computed on
Returns
-------
ndarray
"""
if isinstance(x, np.ndarray):
return x * np.sign(x)
elif x.real < 0.0:
return -x
return x | 5,356,123 |
def downgrade():
"""Downgrade database."""
op.drop_column("accounts_user", "preferences")
op.drop_column("accounts_user", "profile")
op.drop_constraint(
op.f("uq_accounts_user_username"), "accounts_user", type_="unique"
)
op.drop_column("accounts_user", "displayname")
op.drop_column("accounts_user", "username") | 5,356,124 |
async def add_comm_post(request):
# return json.dumps(current_id, title, link, proc_id)
"""current_id ััะพ id ะฒะตัะบะธ"""
# ip = request.environ.get('REMOTE_ADDR')
data = await request.post(); ip = None
print('data->', data)
#get ip address client
peername = request.transport.get_extra_info('peername'); host=None
if peername is not None:
host, port = peername
ip = host
# print ('host, port->', host, port)
user = get_current_user(request, True)
if check_ban(request, host, user):
return response_json(request, {"result":"fail", "error":"ะะฐั ip ะธะปะธ ะฐะบะบะฐัะฝั ะทะฐะฑะฐะฝะตะฝ ะฝะฐ ััะพะผ ัะฐะนัะต, ัะฒัะถะธัะตัั ั ะฐะดะผะธะฝะธัััะฐัะธะตะน"})
else: title = data.get('title')
if not user_has_permission(request, 'des:obj', 'add_com') and not user_has_permission(request, 'des:obj', 'add_com_pre'):
return response_json(request, {"result":"fail", "error":"no comment"})
if not check_user_rate(request, user):
return response_json(request, {"result":"fail", "error":"ะั ะฝะต ะผะพะถะตัะต ะพััะฐะฒะปััั ัะพะพะฑัะตะฝะธั ัะปะธัะบะพะผ ัะฐััะพ, ะธะท-ะทะฐ ะพััะธัะฐัะตะปัะฝะพะน ะบะฐัะผั"})
doc_id = data.get('comm_id')
id = data.get('id')
if user_is_logged_in(request): title = get_current_user(request)
# tle = get_doc(request, doc_id )
# print( doc_id )
# print( tle )
# tle = get_doc(request, doc_id )['doc']['title']
title_ = ct(request, title )
title = no_script( title ) if title else 'ะะฝะพะฝะธะผ'
parent = data.get('parent', "_")
descr = data.get( 'descr')
descr = no_script( descr )
descr = descr.replace('\n', '<br/>')
# ัะตัััะฝ ะตัะปะธ ะฝะตั ะธ ัะพะณะพ ะธ ะดััะณะพะณะพ ะฐ ะตัะปะธ ะฝะตั ัะพะปัะบะพ ะพะดะฝะพะณะพ ัะพ ะบะฐะบ ัะฐะท ะฟัะพะฒะตัะธะผ
pre = 'true' if not user_has_permission(request, 'des:obj', 'add_com') else 'false'
date = str( time.strftime("%Y-%m-%d %H:%M:%S") )
user_ = get_current_user_name(request, title ) or title
our = "true" if user_is_logged_in(request) else "false"
body = re.sub(r'(http?://([a-z0-9-]+([.][a-z0-9-]+)+)+(/([0-9a-z._%?#]+)+)*/?)', r'<a href="\1">\1</a>', descr)
# ะดะพะฑะฐะฒะปะตะฝะธะต ัะพะดะธัะตะปั ัะตะฑะตะฝะบะฐ
request.db.doc.update({ "_id": parent }, { "$addToSet": { "child": doc_id } } )
# ะทะฐะฝะตัะตะฝะธะต ะบะพะผะตะฝัะพะฒ ะฒ ัะฟัะฐะฒะพัะฝะธะบ ะบะพะผะตะฝัะพะฒ
doc_id_comm, updated = create_empty_row_(request, 'des:comments', parent, '', { "user":'user:'+title })
data = {"id":doc_id_comm, "title":title_, "date":date, "body":body, "parent":parent, "owner":id, 'ip':ip, 'name':user_, "our":our, 'pre':pre }
update_row_(request, 'des:comments', doc_id_comm, data, parent)
if 'notify_user' in dir(settings) and settings.notify_user:
# if 'notify_user' in settings and settings.notify_user:
# link = make_link('show_object', {'doc_id':doc_id }, True)+'#comm_'+ str( id )
link = settings.domain+'/news/'+doc_id+'#comm_'+ str( id )
subject = 'User {} add comment'.format( title )
sub('user:'+title, link, subject)
print('id1', id)
id = get_doc(request, id)['_id']
print('id2', id)
invalidate_cache('single_page', id=id)
# rev = get_doc(request, doc_id)['doc']['rev']
# reset_cache(type="doc", doc_id=rev)
# ะดะพะฑะฐะฒะปะตะฝะธะต ะฟะพะดััะตัะฐ ะบะพะผะตะฝัะฐัะธะตะฒ ะฒ ะพัะดะตะปัะฝะพะผ ะดะพะบัะผะตะฝัะต
request.db.doc.update({ "_id": doc_id }, { "$inc": { "count_branch":1 } } )
# return json.dumps({"result":"ok", "content":data.update({"title":title}), "hash":""})
return response_json(request, {"result":"ok", "content":data, "hash":""}) | 5,356,125 |
def reachable_from_node(node, language=None, include_aliases=True):
"""Returns a tuple of strings containing html <ul> lists of the Nodes and
pages that are children of "node" and any MetaPages associated with these
items.
:params node: node to find reachables for
:params language: if None, returns all items, if specified restricts list
to just those with the given language, defaults to None
:params include_aliases: False to skip calculation of aliases, returns
None for second item in tuple
:returns: (node_list, alias_list)
"""
alias_list = None
if include_aliases:
# find all of the MetaPages that would be unreachable
nodes = list(node.get_descendants())
nodes.append(node)
metapages = MetaPage.objects.filter(node__in=nodes)
# find anything that aliases one of the targeted metapages
alias_list = reachable_aliases(metapages, language)
node_list = \
"""<ul>
%s
</ul>""" % _pages_subtree_as_list(node, node.site.default_language)
return (node_list, alias_list) | 5,356,126 |
def init_data():
"""
setup all kinds of constants here, just to make it cleaner :)
"""
if args.dataset=='imagenet32':
mean = (0.4811, 0.4575, 0.4078)
std = (0.2605 , 0.2533, 0.2683)
num_classes = 1000
else:
raise NotImplementedError
if args.whiten_image==0:
mean = (0.5, 0.5, 0.5)
std = (0.5, 0.5, 0.5)
transform_train = transforms.Compose([
transforms.RandomHorizontalFlip(), # with p = 0.5
transforms.RandomCrop(32, padding=4, padding_mode='reflect'), # with p = 1
transforms.ToTensor(),
transforms.Normalize(mean, std)
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean, std)
])
trainset = ImageNet32(root=args.data_root, train=True,transform=transform_train)
testset = ImageNet32(root=args.data_root, train=False,transform=transform_test)
return trainset, testset, transform_train, transform_test, num_classes | 5,356,127 |
def make_status_craft():
""" Cria alguns status de pedido de fabricaรงรฃo"""
if Statusfabricacao.objects.count() == 0:
status1 = Statusfabricacao(order=0, status='Pedido Criado')
status2 = Statusfabricacao(order=1, status='Maturaรงรฃo')
status3 = Statusfabricacao(order=2, status='Finalizaรงรฃo')
status4 = Statusfabricacao(order=3, status='Produรงรฃo Encerrada')
status1.save()
status2.save()
status3.save()
status4.save()
return True
return False | 5,356,128 |
def _call_rest_api(url, input_data, request_type):
"""Calls the other rest api's"""
try:
if request_type == 'post':
req = requests.post(url, params=input_data, json=input_data, timeout=30)
else:
req = requests.get(url, params=input_data, timeout=30)
response = req.text
val = json.loads(response)
except Exception as e:
logger.error("Exception in _call_rest_api : " + str(e))
raise ValueError("Filter is down!!!!")
return val | 5,356,129 |
def compute_tso_threshold(arr, min_td=0.1, max_td=0.5, perc=10, factor=15.0):
"""
Computes the daily threshold value separating rest periods from active periods
for the TSO detection algorithm.
Parameters
----------
arr : array
Array of the absolute difference of the z-angle.
min_td : float
Minimum acceptable threshold value.
max_td : float
Maximum acceptable threshold value.
perc : integer, optional
Percentile to use for the threshold. Default is 10.
factor : float, optional
Factor to multiply the percentil value by. Default is 15.0.
Returns
-------
td : float
"""
td = min((max((percentile(arr, perc) * factor, min_td)), max_td))
return td | 5,356,130 |
def test_graceful_squeezing(loss):
"""Test that reshaped raw_prediction gives same results."""
y_true, raw_prediction = random_y_true_raw_prediction(
loss=loss,
n_samples=20,
y_bound=(-100, 100),
raw_bound=(-10, 10),
seed=42,
)
if raw_prediction.ndim == 1:
raw_prediction_2d = raw_prediction[:, None]
assert_allclose(
loss.loss(y_true=y_true, raw_prediction=raw_prediction_2d),
loss.loss(y_true=y_true, raw_prediction=raw_prediction),
)
assert_allclose(
loss.loss_gradient(y_true=y_true, raw_prediction=raw_prediction_2d),
loss.loss_gradient(y_true=y_true, raw_prediction=raw_prediction),
)
assert_allclose(
loss.gradient(y_true=y_true, raw_prediction=raw_prediction_2d),
loss.gradient(y_true=y_true, raw_prediction=raw_prediction),
)
assert_allclose(
loss.gradient_hessian(y_true=y_true, raw_prediction=raw_prediction_2d),
loss.gradient_hessian(y_true=y_true, raw_prediction=raw_prediction),
) | 5,356,131 |
def test_gen_date_14():
"""max-date must be a Datetime type."""
with pytest.raises(ValueError):
gen_datetime(
min_date=datetime.datetime.now(),
max_date='foo'
) | 5,356,132 |
def run_generator(conversation_name):
"""
Input:
conversation_name: name of conversation to analyze
Output:
username of next speaker, message for that speaker to send next
"""
state = settings.DISCORD_CONVERSATION_STATES.get(conversation_name, {})
(
next_speaker_username,
next_message,
convo,
index,
) = generate_next_speaker_and_message(state, conversation_name)
if not next_speaker_username:
return None, None
bot = TwitterBot.objects.get(username=next_speaker_username)
post = TwitterPost.objects.create(author=bot, content=next_message)
convo.twitterconversationpost_set.create(index=index, author=bot, post=post)
return next_speaker_username, next_message | 5,356,133 |
def count_uniques(row):
"""
Count the unique values in row -1 (becase nan counts as a unique value)
"""
return len(np.unique(row)) - 1 | 5,356,134 |
def gmres_dot(X, surf_array, field_array, ind0, param, timing, kernel):
"""
It computes the matrix-vector product in the GMRES.
Arguments
----------
X : array, initial vector guess.
surf_array : array, contains the surface classes of each region on the
surface.
field_array: array, contains the Field classes of each region on the
surface.
ind0 : class, it contains the indices related to the treecode
computation.
param : class, parameters related to the surface.
timing : class, it contains timing information for different parts of
the code.
kernel : pycuda source module.
Returns
--------
MV : array, resulting matrix-vector multiplication.
"""
Nfield = len(field_array)
Nsurf = len(surf_array)
# Check if there is a complex dielectric
if any([numpy.iscomplexobj(f.E) for f in field_array]):
complex_diel = True
else:
complex_diel = False
# Place weights on corresponding surfaces and allocate memory
Naux = 0
for i in range(Nsurf):
N = len(surf_array[i].triangle)
if surf_array[i].surf_type == 'dirichlet_surface':
if complex_diel:
surf_array[i].XinK = numpy.zeros(N, dtype=numpy.complex)
else:
surf_array[i].XinK = numpy.zeros(N)
surf_array[i].XinV = X[Naux:Naux + N]
Naux += N
elif surf_array[i].surf_type == 'neumann_surface' or surf_array[
i].surf_type == 'asc_surface':
surf_array[i].XinK = X[Naux:Naux + N]
if complex_diel:
surf_array[i].XinV = numpy.zeros(N, dtype=numpy.complex)
else:
surf_array[i].XinV = numpy.zeros(N)
Naux += N
else:
surf_array[i].XinK = X[Naux:Naux + N]
surf_array[i].XinV = X[Naux + N:Naux + 2 * N]
Naux += 2 * N
if complex_diel:
surf_array[i].Xout_int = numpy.zeros(N, dtype=numpy.complex)
surf_array[i].Xout_ext = numpy.zeros(N, dtype=numpy.complex)
else:
surf_array[i].Xout_int = numpy.zeros(N)
surf_array[i].Xout_ext = numpy.zeros(N)
# Loop over fields
for F in range(Nfield):
parent_type = 'no_parent'
if len(field_array[F].parent) > 0:
parent_type = surf_array[field_array[F].parent[0]].surf_type
if parent_type == 'asc_surface':
# ASC only for self-interaction so far
LorY = field_array[F].LorY
p = field_array[F].parent[0]
v = selfASC(surf_array[p], p, p, LorY, param, ind0, timing, kernel)
surf_array[p].Xout_int += v
if parent_type != 'dirichlet_surface' and parent_type != 'neumann_surface' and parent_type != 'asc_surface':
LorY = field_array[F].LorY
param.kappa = field_array[F].kappa
if len(field_array[F].parent) > 0:
p = field_array[F].parent[0]
v = selfInterior(surf_array[p], p, LorY, param, ind0, timing,
kernel)
surf_array[p].Xout_int += v
# if child surface -> self exterior operator + sibling interaction
# sibling interaction: non-self exterior saved on exterior vector
if len(field_array[F].child) > 0:
C = field_array[F].child
for c1 in C:
v, t1, t2 = selfExterior(surf_array[c1], c1, LorY, param,
ind0, timing, kernel)
surf_array[c1].Xout_ext += v
for c2 in C:
if c1 != c2:
v = nonselfExterior(surf_array, c2, c1, LorY,
param, ind0, timing, kernel)
surf_array[c1].Xout_ext += v
# if child and parent surface -> parent-child and child-parent interaction
# parent->child: non-self interior saved on exterior vector
# child->parent: non-self exterior saved on interior vector
if len(field_array[F].child) > 0 and len(field_array[
F].parent) > 0:
p = field_array[F].parent[0]
C = field_array[F].child
for c in C:
v = nonselfExterior(surf_array, c, p, LorY, param, ind0,
timing, kernel)
surf_array[p].Xout_int += v
v = nonselfInterior(surf_array, p, c, LorY, param, ind0,
timing, kernel)
surf_array[c].Xout_ext += v
# Gather results into the result vector
if complex_diel:
MV = numpy.zeros(len(X), dtype=numpy.complex)
else:
MV = numpy.zeros(len(X))
Naux = 0
for i in range(Nsurf):
N = len(surf_array[i].triangle)
if surf_array[i].surf_type == 'dirichlet_surface':
MV[Naux:Naux + N] = surf_array[i].Xout_ext * surf_array[i].Precond[
0, :]
Naux += N
elif surf_array[i].surf_type == 'neumann_surface':
MV[Naux:Naux + N] = surf_array[i].Xout_ext * surf_array[i].Precond[
0, :]
Naux += N
elif surf_array[i].surf_type == 'asc_surface':
MV[Naux:Naux + N] = surf_array[i].Xout_int * surf_array[i].Precond[
0, :]
Naux += N
else:
MV[Naux:Naux + N] = surf_array[i].Xout_int * surf_array[i].Precond[
0, :] + surf_array[i].Xout_ext * surf_array[i].Precond[1, :]
MV[Naux + N:Naux + 2 * N] = surf_array[i].Xout_int * surf_array[
i].Precond[2, :] + surf_array[i].Xout_ext * surf_array[
i].Precond[3, :]
Naux += 2 * N
return MV | 5,356,135 |
def test_compile_3():
""" test_compile_3 """
# test for Graph mode
# has bias
context.set_context(mode=context.GRAPH_MODE)
net = Net(128, 10)
input_data = Tensor(np.random.randint(0, 255, [128, 128]).astype(np.float32))
_executor.compile(net, input_data)
# training
net_train = Net(128, 10)
net_train.set_train()
_executor.compile(net_train, input_data) | 5,356,136 |
def update_pris(traj, td_loss, indices, alpha=0.6, epsilon=1e-6, update_epi_pris=False, seq_length=None, eta=0.9):
"""
Update priorities specified in indices.
Parameters
----------
traj : Traj
td_loss : torch.Tensor
indices : torch.Tensor ot List of int
alpha : float
epsilon : float
update_epi_pris : bool
If True, all priorities of a episode including indices[0] are updated.
seq_length : int
Length of batch.
eta : float
Returns
-------
traj : Traj
"""
pris = (torch.abs(td_loss) + epsilon) ** alpha
traj.data_map['pris'][indices] = pris.detach().to(traj.traj_device())
if update_epi_pris:
epi_start = -1
epi_end = -1
seq_start = indices[0]
for i in range(1, len(traj._epis_index)):
if seq_start < traj._epis_index[i]:
epi_start = traj._epis_index[i-1]
epi_end = traj._epis_index[i]
break
pris = traj.data_map['pris'][epi_start: epi_end]
n_seq = len(pris) - seq_length + 1
abs_pris = np.abs(pris.cpu().numpy())
seq_pris = np.array([eta * np.max(abs_pris[i:i+seq_length]) + (1 - eta) *
np.mean(abs_pris[i:i+seq_length]) for i in range(n_seq)], dtype='float32')
traj.data_map['seq_pris'][epi_start:epi_start +
n_seq] = torch.tensor(seq_pris, dtype=torch.float, device=get_device())
return traj | 5,356,137 |
def get_library_version(performer_prefix: str, schemas: Sequence[Schema]) -> str:
"""Generates the library's version string.
The version string is of the form "{performer_prefix}_{latest_creation_date}_{library_hash}".
Args:
performer_prefix: Performer prefix for context.
schemas: YAML schemas.
Returns:
Version string.
"""
# New class is needed to properly convert entire library to JSON
class YamlLibrary(BaseModel):
__root__: Sequence[Schema]
yaml_library = YamlLibrary(__root__=schemas)
json_schemas = yaml_library.json(exclude_none=True, ensure_ascii=False)
input_hash = hashlib.md5(json_schemas.encode()).hexdigest()[:7]
latest_creation_date = max(schema.creation_date_formatted for schema in schemas)
library_version = f"{performer_prefix}_{latest_creation_date}_{input_hash}"
return library_version | 5,356,138 |
def revert(revision):
"""
Reverts application to selected revision.
Usage: fab prod revert:ae7b9acb96c3fea00ab855952071570279b5d978
"""
with virtualenv():
run('git checkout {}'.format(revision))
run('git submodule update') | 5,356,139 |
def test_for_user_bootcamp_run(lines_fulfilled):
"""
Test for the for_user_bootcamp_run classmethod
"""
line_fulfilled_1, _, user = lines_fulfilled
assert list(Line.for_user_bootcamp_run(user, line_fulfilled_1.bootcamp_run)) == [
line_fulfilled_1
] | 5,356,140 |
def test_raise_fe():
"""Test the raise of KeyError in fe."""
x = np.linspace(0, 1, num=5)
natoms_a = np.full(5, 8)
df = pd.DataFrame({"x": x, "natoms_a": natoms_a})
with pytest.raises(KeyError):
exma.electrochemistry.formation_energy(df, -1.0, -0.5) | 5,356,141 |
def sizeFromString(sizeStr, relativeSize):
"""
Converts from a size string to a float size.
sizeStr: The string representation of the size.
relativeSize: The size to use in case of percentages.
"""
if not sizeStr:
raise Exception("Size not specified")
dpi = 96.0
cm = 2.54
if len(sizeStr) > 2 and sizeStr[-2:] == 'cm':
return float(sizeStr[:-2])*dpi/cm
elif len(sizeStr) > 2 and sizeStr[-2:] == 'mm':
return float(sizeStr[:-2])*dpi/(cm*10.0)
elif len(sizeStr) > 1 and sizeStr[-1:] == 'Q':
return float(sizeStr[:-1])*dpi/(cm*40.0)
elif len(sizeStr) > 2 and sizeStr[-2:] == 'in':
return float(sizeStr[:-2])*dpi
elif len(sizeStr) > 2 and sizeStr[-2:] == 'pc':
return float(sizeStr[:-2])*dpi/6.0
elif len(sizeStr) > 2 and sizeStr[-2:] == 'pt':
return float(sizeStr[:-2])*dpi/72.0
elif len(sizeStr) > 2 and sizeStr[-2:] == 'em':
return float(sizeStr[:-2])*16.0
elif len(sizeStr) > 2 and sizeStr[-2:] == 'px':
return float(sizeStr[:-2])
elif len(sizeStr) > 1 and sizeStr[-1:] == '%':
return float(sizeStr[:-1])/100.0*relativeSize
return float(sizeStr) | 5,356,142 |
def plot_r2(
model: mofa_model,
x="Group",
y="Factor",
factors: Union[int, List[int], str, List[str]] = None,
groups_df: pd.DataFrame = None,
group_label: str = None,
views=None,
groups=None,
cmap="Blues",
vmin=None,
vmax=None,
**kwargs,
):
"""
Plot R2 values for the model
Parameters
----------
model : mofa_model
Factor model
x : str
Dimension along X axis: Group (default), View, or Factor
y : str
Dimension along Y axis: Group, View, or Factor (default)
factors : optional
Index of a factor (or indices of factors) to use (all factors by default)
views : optional
Make a plot for certain views (None by default to plot all views)
groups : optional
Make a plot for certain groups (None by default to plot all groups)
group_label : optional
Sample (cell) metadata column to be used as group assignment
groups_df : optional pd.DataFrame
Data frame with samples (cells) as index and first column as group assignment
cmap : optional
The colourmap for the heatmap (default is 'Blues' with darker colour for higher R2)
vmin : optional
Display all R2 values smaller than vmin as vmin (0 by default)
vmax : optional
Display all R2 values larger than vmax as vmax (derived from the data by default)
"""
r2 = model.get_r2(
factors=factors,
groups=groups,
views=views,
group_label=group_label,
groups_df=groups_df,
)
vmax = r2.R2.max() if vmax is None else vmax
vmin = 0 if vmin is None else vmin
split_by = [dim for dim in ["Group", "View", "Factor"] if dim not in [x, y]]
assert (
len(split_by) == 1
), "x and y values should be different and be one of Group, View, or Factor"
split_by = split_by[0]
split_by_items = r2[split_by].unique()
fig, axes = plt.subplots(ncols=len(split_by_items), sharex=True, sharey=True)
cbar_ax = fig.add_axes([0.91, 0.3, 0.03, 0.4])
if len(split_by_items) == 1:
axes = [axes]
for i, item in enumerate(split_by_items):
r2_sub = r2[r2[split_by] == item]
r2_df = r2_sub.sort_values("R2").pivot(index=y, columns=x, values="R2")
if y == "Factor":
# Sort by factor index
r2_df.index = r2_df.index.astype("category")
r2_df.index = r2_df.index.reorder_categories(
sorted(r2_df.index.categories, key=lambda x: int(x.split("Factor")[1]))
)
r2_df = r2_df.sort_values("Factor")
if x == "Factor":
# Re-order columns by factor index
r2_df.columns = r2_df.columns.astype("category")
r2_df.columns = r2_df.columns.reorder_categories(
sorted(
r2_df.columns.categories, key=lambda x: int(x.split("Factor")[1])
)
)
r2_df = r2_df[r2_df.columns.sort_values()]
g = sns.heatmap(
r2_df.sort_index(level=0, ascending=False),
ax=axes[i],
cmap=cmap,
vmin=vmin,
vmax=vmax,
cbar=i == 0,
cbar_ax=None if i else cbar_ax,
**kwargs,
)
axes[i].set_title(item)
axes[i].tick_params(axis="both", which="both", length=0)
if i == 0:
g.set_yticklabels(g.yaxis.get_ticklabels(), rotation=0)
else:
axes[i].set_ylabel("")
plt.close()
return fig | 5,356,143 |
def all(x: Union[ivy.Array, ivy.NativeArray],
axis: Optional[Union[int, Tuple[int], List[int]]] = None,
keepdims: bool = False)\
-> ivy.Array:
"""
Tests whether all input array elements evaluate to ``True`` along a specified axis.
.. note::
Positive infinity, negative infinity, and NaN must evaluate to ``True``.
.. note::
If ``x`` is an empty array or the size of the axis (dimension) along which to evaluate elements is zero, the test result must be ``True``.
Parameters
----------
x:
input array.
axis:
axis or axes along which to perform a logical AND reduction. By default, a logical AND reduction must be performed over the entire array. If a tuple of integers, logical AND reductions must be performed over multiple axes. A valid ``axis`` must be an integer on the interval ``[-N, N)``, where ``N`` is the rank (number of dimensions) of ``x``. If an ``axis`` is specified as a negative integer, the function must determine the axis along which to perform a reduction by counting backward from the last dimension (where ``-1`` refers to the last dimension). If provided an invalid ``axis``, the function must raise an exception. Default: ``None``.
keepdims:
If ``True``, the reduced axes (dimensions) must be included in the result as singleton dimensions, and, accordingly, the result must be compatible with the input array (see :ref:`broadcasting`). Otherwise, if ``False``, the reduced axes (dimensions) must not be included in the result. Default: ``False``.
Returns
-------
out:
if a logical AND reduction was performed over the entire array, the returned array must be a zero-dimensional array containing the test result; otherwise, the returned array must be a non-zero-dimensional array containing the test results. The returned array must have a data type of ``bool``.
"""
return _cur_framework(x).all(x, axis, keepdims) | 5,356,144 |
def compilePy(target):
"""Compiles py files to pyc and removes all py files at the end."""
import compileall
compileall.compile_dir(target, force=True, legacy=True)
os.system(f'find "{target}" -name "*.py" -type f -delete') | 5,356,145 |
def verify_time_format(time_str):
"""
This method is to verify time str format, which is in the format of 'hour:minute', both can be either one or two
characters.
Hour must be greater or equal 0 and smaller than 24, minute must be greater or equal 0 and smaller than 60
:param time_str: time str
:return:
"""
if not isinstance(time_str, str):
return False
time_format = r'^(\d{1,2}):(\d{1,2})$'
matched = re.match(time_format, time_str)
if matched:
if 0 <= int(matched.group(1)) < 24 and 0 <= int(matched.group(2)) < 60:
return True
else:
print('Hour should be within [0, 24); Minute should be within [0, 60)')
return False
else:
return False | 5,356,146 |
def TestTags(client, get_fn, add_fn, delete_fn, *args):
""" Tests whether tagging works.
@type client: C{GanetiRapiClientWrapper}
@param client: The client wrapper.
@type get_fn: function
@param get_fn: A Get*Tags function of the client.
@type add_fn: function
@param add_fn: An Add*Tags function of the client.
@type delete_fn: function
@param delete_fn: A Delete*Tags function of the client.
To allow this method to work for all tagging functions of the client, use
named methods.
"""
get_fn(*args)
tags = ["tag1", "tag2", "tag3"]
Finish(client, add_fn, *args, tags=tags, dry_run=True)
Finish(client, add_fn, *args, tags=tags)
get_fn(*args)
Finish(client, delete_fn, *args, tags=tags[:1], dry_run=True)
Finish(client, delete_fn, *args, tags=tags[:1])
get_fn(*args)
Finish(client, delete_fn, *args, tags=tags[1:])
get_fn(*args) | 5,356,147 |
def test_export_overwrite(tmpdir, data):
"""Overwrites existing file"""
inputfile = str(data.join('RGB.byte.tif'))
output = tmpdir.join('export.mbtiles')
output.write("lolwut")
outputfile = str(output)
runner = CliRunner()
result = runner.invoke(main_group, ['mbtiles', '--overwrite', inputfile,
outputfile])
assert result.exit_code == 0
conn = sqlite3.connect(outputfile)
cur = conn.cursor()
cur.execute("select * from metadata where name == 'name'")
assert cur.fetchone()[1] == 'RGB.byte.tif' | 5,356,148 |
def extract_region_df(region_code="11"):
"""
Extracts dataframes that describes regional-level vaccines data for a single region, making some analysis on it.
:rtype: Dataframe
"""
df = RAW_DF
df = df.loc[df['codice_regione_ISTAT'] == region_code]
df = df.sort_values('data_somministrazione')
df = df.reset_index()
# Filter data from September
df = df[df['data_somministrazione'] >= '2021-01-01']
# Doses per 100.000 inhabitants
df['prima_dose_per_100000_ab'] = df.apply(lambda x: x['prima_dose'] / population_dict[x['codice_regione_ISTAT']] * 100000,
axis=1)
df['seconda_dose_per_100000_ab'] = df.apply(lambda x: x['seconda_dose'] / population_dict[x['codice_regione_ISTAT']]
* 100000, axis=1)
df['totale_su_pop'] = df.apply(lambda x: x['totale'] / population_dict[x['codice_regione_ISTAT']], axis=1)
df['totale_per_100000_ab'] = df.apply(lambda x: x['totale_su_pop'] * 100000, axis=1)
# Historical totals
df['totale_storico'] = df['totale'].cumsum()
df['totale_storico_su_pop'] = df.apply(lambda x: x['totale_storico'] / population_dict[x['codice_regione_ISTAT']], axis=1)
df['prima_dose_totale_storico'] = df['prima_dose'].cumsum()
df['prima_dose_totale_storico_su_pop'] = df.apply(lambda x: x['prima_dose_totale_storico'] /
population_dict[x['codice_regione_ISTAT']], axis=1)
df['seconda_dose_totale_storico'] = df['seconda_dose'].cumsum()
df['seconda_dose_totale_storico_su_pop'] = df.apply(lambda x: x['seconda_dose_totale_storico'] /
population_dict[x['codice_regione_ISTAT']], axis=1)
return df | 5,356,149 |
def get_title(offer_markup):
""" Searches for offer title on offer page
:param offer_markup: Class "offerbody" from offer page markup
:type offer_markup: str
:return: Title of offer
:rtype: str, None
"""
html_parser = BeautifulSoup(offer_markup, "html.parser")
return html_parser.h1.text.strip() | 5,356,150 |
def genome(request):
"""Create a test genome and location"""
name = "ce10" # Use fake name for blacklist test
fafile = "tests/data/small_genome.fa.gz"
genomes_dir = os.path.join(os.getcwd(), ".genomepy_plugin_tests")
if os.path.exists(genomes_dir):
shutil.rmtree(genomes_dir)
genome_dir = os.path.join(genomes_dir, name)
genomepy.utils.mkdir_p(genome_dir)
fname = os.path.join(genome_dir, f"{name}.fa.gz")
shutil.copyfile(fafile, fname)
# unzip genome if required
if request.param == "unzipped":
sp.check_call(["gunzip", fname])
# add annotation (for STAR and hisat2), but only once
gtf_file = "tests/data/ce10.annotation.gtf.gz"
aname = os.path.join(genome_dir, f"{name}.annotation.gtf.gz")
shutil.copyfile(gtf_file, aname)
return genomepy.Genome(name, genomes_dir=genomes_dir) | 5,356,151 |
def get_twinboundary_shear_structure(twinboundary_relax_structure,
shear_strain_ratio,
previous_relax_structure=None,
**additional_relax_structures,
):
"""
If latest_structure is None, use s=0 structure as the original
structure to be sheared. shear_strain_ratios must include zero.
additional_relaxes is AttributeDict.
"""
relax_wf = WorkflowFactory('vasp.relax')
tb_relax_wf = WorkflowFactory('twinpy.twinboundary_relax')
ratio = shear_strain_ratio.value
tb_rlx_node = get_create_node(twinboundary_relax_structure.pk,
tb_relax_wf)
addi_rlx_pks = []
for i in range(len(additional_relax_structures)):
label = 'additional_structure_%02d' % (i+1)
structure_pk_ = additional_relax_structures[label].pk
rlx_pk = get_create_node(structure_pk_,
relax_wf).pk
addi_rlx_pks.append(rlx_pk)
aiida_twinboundary_relax = \
AiidaTwinBoudnaryRelaxWorkChain(tb_rlx_node)
aiida_rlx = aiida_twinboundary_relax.get_aiida_relax(
additional_relax_pks=addi_rlx_pks)
tb_analyzer = \
aiida_twinboundary_relax.get_twinboundary_analyzer(
additional_relax_pks=addi_rlx_pks)
if addi_rlx_pks == []:
kpt_info = aiida_rlx.get_kpoints_info()
else:
kpt_info = aiida_rlx.aiida_relaxes[0].get_kpoints_info()
if previous_relax_structure is None:
orig_cell = tb_analyzer.get_shear_cell(
shear_strain_ratio=ratio,
is_standardize=False)
cell = tb_analyzer.get_shear_cell(
shear_strain_ratio=ratio,
is_standardize=True)
else:
prev_rlx_node = get_create_node(previous_relax_structure.pk, relax_wf)
create_tb_shr_node = get_create_node(prev_rlx_node.inputs.structure.pk,
CalcFunctionNode)
prev_orig_structure = \
create_tb_shr_node.outputs.twinboundary_shear_structure_orig
prev_orig_cell = get_cell_from_aiida(prev_orig_structure)
prev_aiida_rlx = AiidaRelaxWorkChain(prev_rlx_node)
prev_rlx_analyzer = prev_aiida_rlx.get_relax_analyzer(
original_cell=prev_orig_cell)
atom_positions = \
prev_rlx_analyzer.final_cell_in_original_frame[1]
orig_cell = tb_analyzer.get_shear_cell(
shear_strain_ratio=ratio,
is_standardize=False,
atom_positions=atom_positions)
cell = tb_analyzer.get_shear_cell(
shear_strain_ratio=ratio,
is_standardize=True,
atom_positions=atom_positions)
orig_structure = get_aiida_structure(cell=orig_cell)
structure = get_aiida_structure(cell=cell)
# kpoints
rlx_mesh = np.array(kpt_info['mesh'])
rlx_offset = np.array(kpt_info['offset'])
rlx_kpoints = (rlx_mesh, rlx_offset)
std_base = StandardizeCell(tb_analyzer.relax_analyzer.original_cell)
orig_kpoints = std_base.convert_kpoints(
kpoints=rlx_kpoints,
kpoints_type='primitive')['original']
std = StandardizeCell(orig_cell)
kpoints = std.convert_kpoints(kpoints=orig_kpoints,
kpoints_type='original')['primitive']
kpt_orig = KpointsData()
kpt_orig.set_kpoints_mesh(orig_kpoints[0], offset=orig_kpoints[1])
kpt = KpointsData()
kpt.set_kpoints_mesh(kpoints[0], offset=kpoints[1])
return_vals = {}
return_vals['twinboundary_shear_structure_orig'] = orig_structure
return_vals['twinboundary_shear_structure'] = structure
return_vals['kpoints_orig'] = kpt_orig
return_vals['kpoints'] = kpt
return return_vals | 5,356,152 |
def search(source_num, bin_path, chrome_args):
"""
Scrape proxies from the web
"""
chrome_args = chrome_args.split(',')
_args = []
for arg in chrome_args:
if len(arg) > 0:
if not arg.startswith('--'):
arg = '--{}'.format(arg)
_args.append(arg)
chrome_args = _args
client = proxytools.Client()
proxies = client.search_proxies(source_num=source_num, bin_path=bin_path, chrome_args=chrome_args)
urls = [str(p) for p in proxies]
print(json.dumps(urls, indent=4)) | 5,356,153 |
def _map_spectrum_weight(map, spectrum=None):
"""Weight a map with a spectrum.
This requires map to have an "energy" axis.
The weights are normalised so that they sum to 1.
The mean and unit of the output image is the same as of the input cube.
At the moment this is used to get a weighted exposure image.
Parameters
----------
map : `~gammapy.maps.Map`
Input map with an "energy" axis.
spectrum : `~gammapy.modeling.models.SpectralModel`
Spectral model to compute the weights.
Default is power-law with spectral index of 2.
Returns
-------
map_weighted : `~gammapy.maps.Map`
Weighted image
"""
if spectrum is None:
spectrum = PowerLawSpectralModel(index=2.0)
# Compute weights vector
energy_edges = map.geom.get_axis_by_name("energy").edges
weights = spectrum.integral(
emin=energy_edges[:-1], emax=energy_edges[1:], intervals=True
)
weights /= weights.sum()
shape = np.ones(len(map.geom.data_shape))
shape[0] = -1
return map * weights.reshape(shape.astype(int)) | 5,356,154 |
def fetch_all_db_as_df(allow_cached=False):
"""Converts list of dicts returned by `fetch_all_db` to DataFrame with ID removed
Actual job is done in `_worker`. When `allow_cached`, attempt to retrieve timed cached from
`_fetch_all_db_as_df_cache`; ignore cache and call `_work` if cache expires or `allow_cached`
is False.
"""
def _work():
ret_dict = fetch_all_db()
if len(ret_dict) == 0:
return None
df_dict = {}
for level, data in ret_dict.items():
df = pd.DataFrame.from_records(data)
df.drop('_id', axis=1, inplace=True)
df.columns = map(str.lower, df.columns)
df_dict[level] = df
return df_dict
if allow_cached:
try:
return _fetch_all_db_as_df_cache['cache']
except KeyError:
pass
ret = _work()
_fetch_all_db_as_df_cache['cache'] = ret
return ret | 5,356,155 |
def tool_proxy_from_persistent_representation(persisted_tool, strict_cwl_validation=True, tool_directory=None):
"""Load a ToolProxy from a previously persisted representation."""
ensure_cwltool_available()
return ToolProxy.from_persistent_representation(
persisted_tool, strict_cwl_validation=strict_cwl_validation, tool_directory=tool_directory
) | 5,356,156 |
def has_space_element(source):
"""
ๅคๆญๅฏน่ฑกไธญ็ๅ
็ด ๏ผๅฆๆๅญๅจ None ๆ็ฉบๅญ็ฌฆไธฒ๏ผๅ่ฟๅ True, ๅฆๅ่ฟๅ False, ๆฏๆๅญๅ
ธใๅ่กจๅๅ
็ป
:param:
* source: (list, set, dict) ้่ฆๆฃๆฅ็ๅฏน่ฑก
:return:
* result: (bool) ๅญๅจ None ๆ็ฉบๅญ็ฌฆไธฒๆ็ฉบๆ ผๅญ็ฌฆไธฒ่ฟๅ True๏ผ ๅฆๅ่ฟๅ False
ไธพไพๅฆไธ::
print('--- has_space_element demo---')
print(has_space_element([1, 2, 'test_str']))
print(has_space_element([0, 2]))
print(has_space_element([1, 2, None]))
print(has_space_element((1, [1, 2], 3, '')))
print(has_space_element({'a': 1, 'b': 0}))
print(has_space_element({'a': 1, 'b': []}))
print('---')
ๆง่ก็ปๆ::
--- has_space_element demo---
False
False
True
True
False
True
---
"""
if isinstance(source, dict):
check_list = list(source.values())
elif isinstance(source, list) or isinstance(source, tuple):
check_list = list(source)
else:
raise TypeError('source except list, tuple or dict, but got {}'.format(type(source)))
for i in check_list:
if i is 0:
continue
if not (i and str(i).strip()):
return True
return False | 5,356,157 |
def return_latest_psm_is(df, id_col, file_col, instr_col, psm_col):
""" Extracts info on PSM number, search ID and Instrument from the last row in DB
"""
last_row = df.iloc[-1]
search_id = last_row[id_col]
instr = last_row[instr_col]
psm = last_row[psm_col]
psm_string = str(psm) + ' PSMs in file ' + str(last_row[file_col])
print('String to put on the graph', psm_string)
return (search_id, instr, psm, psm_string) | 5,356,158 |
async def check_find_settings(settings_collection: SettingsCollection, test_data: dict):
"""
Check that the find settings in the database collection returns the expected result.
:param settings_collection: MongoDB collection.
:type settings_collection: SettingsCollection
:param test_data: Database test data.
:type test_data: dict
"""
setup_data, data, expected_data = parse_test_sample(test_data)
await setup_database(settings_collection, setup_data)
search_results = await settings_collection.find_settings_post(data['guild_id'])
search_results.pop('_id')
assert_message = f"Invalid search result, should be {expected_data}."
assert search_results == expected_data, assert_message | 5,356,159 |
def add_parser_arguments_misc(parser):
"""
Adds the options that the command line parser will search for, some miscellaneous parameters, like use of gpu,
timing, etc.
:param parser: the argument parser
:return: the same parser, but with the added options.
"""
parser.add_argument('--use_gpu', action='store_true',
help='use GPU (CUDA). For loading data on Windows OS, if you get an Access Denied or Operation '
'Not Supported for cuda, you must set --loader_num_workers to 0 '
'(you can\'t share CUDA tensors among Windows processes).')
parser.add_argument('--gpu_num', default="0", type=str)
parser.add_argument('--map_gpu_beginning', action='store_true',
help='Will map all tensors (including FULL dataset) to GPU at the start of the instance, if '
'--use_gpu flag is supplied and CUDA is available. This option is NOT recommended if you '
'have low GPU memory or if you dataset is very large, since you may quickly run out of '
'memory.')
parser.add_argument('--timing', action='store_true',
help='if specified, will display times for several parts of training')
parser.add_argument('--load_args_from_json', type=str, default=None,
help='Path to json file containing args to pass. Should be an object containing the keys of '
'the attributes you want to change (keys that you don\'t supply will be left unchanged) '
'and their values according to their type (int, str, bool, list, etc.)')
return parser | 5,356,160 |
def colo_model_tensor_clone(t: Union[StatefulTensor, torch.Tensor], target_device: torch.device) -> torch.Tensor:
"""
Clone a model data tensor
Args:
t (Union[StatefulTensor, torch.Tensor]): a model data tensor
target_device (torch.device): the target device
Returns:
torch.Tensor: a cloned torch tensor
"""
# TODO() rename this function
colo_model_data_tensor_move_inline(t, target_device)
t_payload = t.payload if isinstance(t, StatefulTensor) else t
return t_payload | 5,356,161 |
def plugin_init(config):
"""Registers HTTP Listener handler to accept sensor readings
Args:
config: JSON configuration document for the South device configuration category
Returns:
handle: JSON object to be used in future calls to the plugin
Raises:
"""
handle = config
return handle | 5,356,162 |
def search(query,page):
"""Scrapes the search query page and returns the results in json format.
Parameters
------------
query: The query you want to search for.
page: The page number for which you want the results.
Every page returns 11 results.
"""
driver.get(f'https://phys.libretexts.org/Special:Search?qid=&fpid=230&fpth=&query={query}&type=wiki')
clicks = page
while clicks>1:
showMoreButton = driver.find_element_by_xpath('//*[@id="mt-search-spblls-component"]/div[2]/div/button')
showMoreButton.click()
clicks -= 1
time.sleep(2)
output = []
start = (page-1)* 11
stop = start + 12
for i in range(start+1,stop):
content = driver.find_element_by_xpath(f'//*[@id="search-results"]/li[{i}]/div/div[2]/div[2]/span[1]').text
path = f'//*[@id="search-results"]/li[{i}]/div/div[1]/a'
for a in driver.find_elements_by_xpath(path):
title = a.get_attribute('title')
link = a.get_attribute('href')
result = {
"title":title,
"link":link,
"content":content
}
output.append(result)
output_json = {
"results":output
}
driver.close()
return json.dumps(output_json) | 5,356,163 |
def get_jobs(job_filename):
"""Reads jobs from a known job file location
"""
jobs = list()
if job_filename and os.path.isfile(job_filename):
with open(job_filename, 'r') as input_fd:
data = input_fd.read()
job_dict = json.loads(data)
del data
for job in job_dict['jobs']:
jobs.append(job)
os.unlink(job_filename)
return jobs | 5,356,164 |
def split_train_eval_data(origin_file: str, train_file: str,
eval_file: str, fraction: float = .2) -> None:
"""
ไปๅๅงๆๅจๆ ๆณจๆฐๆฎไธญๅ็ฆป่ฎญ็ปๅๆต่ฏ้
:param origin_file: ๅๅงๆฐๆฎ่ทฏๅพ
:param train_file: ไฟๅญ็่ฎญ็ปๆฐๆฎๆไปถ่ทฏๅพ
:param eval_file: ไฟๅญ็ๆต่ฏๆไปถ่ทฏๅพ
:param fraction: ๅ็ฆป็ๆต่ฏๆฐๆฎๆฏไพ๏ผ้ป่ฎคๆปไฝ็20%
:return:
"""
origin_data = pd.read_excel(origin_file)
eval_data = origin_data.sample(frac=fraction)
train_data = origin_data.drop(index=eval_data.index)
eval_data.reset_index(drop=True, inplace=True)
train_data.reset_index(drop=True, inplace=True)
eval_data.to_excel(eval_file, idnex=False)
train_data.to_excel(train_file, idnex=False) | 5,356,165 |
def quote_with_backticks_definer(definer):
"""Quote the given definer clause with backticks.
This functions quotes the given definer clause with backticks, converting
backticks (`) in the string with the correct escape sequence (``).
definer[in] definer clause to quote.
Returns string with the definer quoted with backticks.
"""
if not definer:
return definer
parts = definer.split('@')
if len(parts) != 2:
return definer
return '@'.join([quote_with_backticks(parts[0]),
quote_with_backticks(parts[1])]) | 5,356,166 |
def cvimg_to_b64(img):
"""
ๅพ็่ฝฌๆขๅฝๆฐ๏ผๅฐไบ่ฟๅถๅพ็่ฝฌๆขไธบbase64ๅ ๅฏๆ ผๅผ
"""
try:
image = cv2.imencode('.jpg', img)[1] #ๅฐๅพ็ๆ ผๅผ่ฝฌๆข(็ผ็ )ๆๆตๆฐๆฎ๏ผ่ตๅผๅฐๅ
ๅญ็ผๅญไธญ
base64_data = str(base64.b64encode(image))[2:-1] #ๅฐๅพ็ๅ ๅฏๆbase64ๆ ผๅผ็ๆฐๆฎ
return base64_data #่ฟๅๅ ๅฏๅ็็ปๆ
except Exception as e:
return "error" | 5,356,167 |
def draw_label(label, img, n_class, label_titles, bg_label=0):
"""Convert label to rgb with label titles.
@param label_title: label title for each labels.
@type label_title: dict
"""
from PIL import Image
from scipy.misc import fromimage
from skimage.color import label2rgb
from skimage.transform import resize
colors = labelcolormap(n_class)
label_viz = label2rgb(label, img, colors=colors[1:], bg_label=bg_label)
# label 0 color: (0, 0, 0, 0) -> (0, 0, 0, 255)
label_viz[label == 0] = 0
# plot label titles on image using matplotlib
plt.subplots_adjust(left=0, right=1, top=1, bottom=0,
wspace=0, hspace=0)
plt.margins(0, 0)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.axis('off')
# plot image
plt.imshow(label_viz)
# plot legend
plt_handlers = []
plt_titles = []
for label_value in np.unique(label):
if label_value not in label_titles:
continue
fc = colors[label_value]
p = plt.Rectangle((0, 0), 1, 1, fc=fc)
plt_handlers.append(p)
plt_titles.append(label_titles[label_value])
plt.legend(plt_handlers, plt_titles, loc='lower right', framealpha=0.5)
# convert plotted figure to np.ndarray
f = StringIO.StringIO()
plt.savefig(f, bbox_inches='tight', pad_inches=0)
result_img_pil = Image.open(f)
result_img = fromimage(result_img_pil, mode='RGB')
result_img = resize(result_img, img.shape, preserve_range=True)
result_img = result_img.astype(img.dtype)
return result_img | 5,356,168 |
def inspect(template_dir, display_type=None):
"""Generates a some string representation of all undefined variables
in templates.
Args:
template_dir (str): all files within are treated as templates
display_type (str): tabulate.tabulate tablefmt or 'terse'.
Examples:
Yields an overview of config parameter placeholders for FireWorks
config template directory `imteksimfw/fireworks/templates/fwconfig`:
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโคโโโโโโโโโโโโโโโคโโโโโโโโโโโโโโโโโโโคโโโโโโโโโโโโโโคโโโโโโโโโโโโโคโโโโโโโโโโโโโโโโโโโโโคโโโโโโโโโโโโคโโโโโโโโโโโโโโโโโคโโโโโโโโโโโโโโโคโโโโโโโโโโโโโโโโโโโโคโโโโโโโโโโคโโโโโโโโโโโโโโโโ
โ โ FIREWORKS_DB โ FW_CONFIG_PREFIX โ WEBGUI_PORT โ LOGDIR_LOC โ MONGODB_PORT_LOCAL โ FW_PREFIX โ FIREWORKS_USER โ MONGODB_HOST โ FW_AUTH_FILE_NAME โ MACHINE โ FIREWORKS_PWD โ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโผโโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโผโโโโโโโโโโโโโโโโค
โ FW_config.yaml โ โ x โ x โ โ โ x โ โ โ x โ x โ โ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโผโโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโผโโโโโโโโโโโโโโโโค
โ bwcloud_noqueue_fworker.yaml โ โ โ โ โ โ โ โ โ โ โ โ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโผโโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโผโโโโโโโโโโโโโโโโค
โ fireworks_mongodb_auth.yaml โ x โ โ โ x โ x โ โ x โ x โ โ โ x โ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโผโโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโผโโโโโโโโโโโโโโโโค
โ forhlr2_noqueue_worker.yaml โ โ โ โ โ โ โ โ โ โ โ โ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโผโโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโผโโโโโโโโโโโโโโโโค
โ forhlr2_queue_worker.yaml โ โ โ โ โ โ โ โ โ โ โ โ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโผโโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโผโโโโโโโโโโโโโโโโค
โ forhlr2_slurm_qadapter.yaml โ โ x โ โ โ โ โ โ โ x โ โ โ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโผโโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโผโโโโโโโโโโโโโโโโค
โ juwels_noqueue_worker.yaml โ โ โ โ โ โ โ โ โ โ โ โ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโผโโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโผโโโโโโโโโโโโโโโโค
โ juwels_queue_worker.yaml โ โ โ โ โ โ โ โ โ โ โ โ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโผโโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโผโโโโโโโโโโโโโโโโค
โ juwels_slurm_qadapter.yaml โ โ x โ โ โ โ โ โ โ x โ โ โ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโผโโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโผโโโโโโโโโโโโโโโโค
โ nemo_moab_qadapter.yaml โ โ x โ โ โ โ โ โ โ x โ โ โ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโผโโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโผโโโโโโโโโโโโโโโโค
โ nemo_noqueue_worker.yaml โ โ โ โ โ โ โ โ โ โ โ โ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโผโโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโโโโผโโโโโโโโโโผโโโโโโโโโโโโโโโโค
โ nemo_queue_worker.yaml โ โ โ โ โ โ โ โ โ โ โ โ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโงโโโโโโโโโโโโโโโงโโโโโโโโโโโโโโโโโโโงโโโโโโโโโโโโโโงโโโโโโโโโโโโโงโโโโโโโโโโโโโโโโโโโโโงโโโโโโโโโโโโงโโโโโโโโโโโโโโโโโงโโโโโโโโโโโโโโโงโโโโโโโโโโโโโโโโโโโโงโโโโโโโโโโงโโโโโโโโโโโโโโโโ
"""
undefined = get_undefined(template_dir)
return variable_overview(undefined, display_type) | 5,356,169 |
def calc_fitness_all(chromosomes, video_list, video_data):
"""Calculates fitness for all chromosomes
Parameters
----------
chromosomes : np.ndarrray
List of chromosomes
video_list : np.ndarray
List of all video titles (in this case number identifiers)
video_data : pd dataframe
Dataframe of Emotion by Time w/ video as a column
Returns
-------
list
Determinant of the covariance matrix of all emotions by time
"""
fitness = []
for chromosome in chromosomes:
fitness.append(calc_fitness_individual(chromosome, video_list,
video_data))
return fitness | 5,356,170 |
def grouping_is_valid(
proposed_grouping: List[Set[str]],
past_groups: List[Set[str]],
max_intersection_size: int,
) -> bool:
"""Returns true if no group in the proposed grouping intersects with any
past group with intersection size strictly greater than
`max_intersection_size`.
"""
for group in proposed_grouping:
for past_group in past_groups:
if len(group & past_group) > max_intersection_size:
return False
return True | 5,356,171 |
def spyder_light(event):
""" Launch spyder in "light" mode """
oldarg = sys.argv
sys.argv = ['spyder', '--light']
spyder_launch(event)
sys.argv = oldarg | 5,356,172 |
def next_wire_in_dimension(wire1, tile1, wire2, tile2, tiles, x_wires, y_wires,
wire_map, wires_in_node):
""" next_wire_in_dimension returns true if tile1 and tile2 are in the same
row and column, and must be adjcent.
"""
tile1_info = tiles[tile1]
tile2_info = tiles[tile2]
tile1_x = tile1_info['grid_x']
tile2_x = tile2_info['grid_x']
tile1_y = tile1_info['grid_y']
tile2_y = tile2_info['grid_y']
# All wires are in the same row or column or if the each wire lies in its own
# row or column.
if len(y_wires) == 1 or len(x_wires) == len(wires_in_node) or abs(
tile1_y - tile2_y) == 0:
ordered_wires = sorted(x_wires.keys())
idx1 = ordered_wires.index(tile1_x)
idx2 = ordered_wires.index(tile2_x)
if len(x_wires[tile1_x]) == 1 and len(x_wires[tile2_x]) == 1:
return abs(idx1 - idx2) == 1
if len(x_wires) == 1 or len(y_wires) == len(wires_in_node) or abs(
tile1_x - tile2_x) == 0:
ordered_wires = sorted(y_wires.keys())
idx1 = ordered_wires.index(tile1_y)
idx2 = ordered_wires.index(tile2_y)
if len(y_wires[tile1_y]) == 1 and len(y_wires[tile2_y]) == 1:
return abs(idx1 - idx2) == 1
return None | 5,356,173 |
def get(*, db_session, report_id: int) -> Optional[Report]:
"""
Get a report by id.
"""
return db_session.query(Report).filter(Report.id == report_id).one_or_none() | 5,356,174 |
def host_from_path(path):
"""returns the host of the path"""
url = urllib.parse.urlparse(path)
return url.netloc | 5,356,175 |
def sampleM(a0, bk, njk, m_cap=20):
"""produces sample from distribution over M using normalized log probabilities parameterizing a
categorical dist."""
raise DeprecationWarning()
wts = np.empty((m_cap,))
sum = 0
for m in range(m_cap):
wts[m] = gammaln(a0*bk) - gammaln(a0*bk+njk) + log(stirling.get(njk, m)+1e-9) + m*(a0+bk)
sum += wts[-1]
wts = np.array(wts) / sum
print(wts, np.sum(wts))
return rand.multinomial(1, wts) | 5,356,176 |
def or_default(none_or_value, default):
"""
inputs:
none_or_value: variable to test
default: value to return if none_or_value is None
"""
return none_or_value if none_or_value is not None else default | 5,356,177 |
def find_optimal_components_subset(contours, edges):
"""Find a crop which strikes a good balance of coverage/compactness.
Returns an (x1, y1, x2, y2) tuple.
"""
c_info = props_for_contours(contours, edges)
c_info.sort(key=lambda x: -x['sum'])
total = np.sum(edges) / 255
area = edges.shape[0] * edges.shape[1]
c = c_info[0]
del c_info[0]
this_crop = c['x1'], c['y1'], c['x2'], c['y2']
crop = this_crop
covered_sum = c['sum']
while covered_sum < total:
changed = False
recall = 1.0 * covered_sum / total
prec = 1 - 1.0 * crop_area(crop) / area
f1 = 2 * (prec * recall / (prec + recall))
#print '----'
for i, c in enumerate(c_info):
this_crop = c['x1'], c['y1'], c['x2'], c['y2']
new_crop = union_crops(crop, this_crop)
new_sum = covered_sum + c['sum']
new_recall = 1.0 * new_sum / total
new_prec = 1 - 1.0 * crop_area(new_crop) / area
new_f1 = 2 * new_prec * new_recall / (new_prec + new_recall)
# Add this crop if it improves f1 score,
# _or_ it adds 25% of the remaining pixels for <15% crop expansion.
# ^^^ very ad-hoc! make this smoother
remaining_frac = c['sum'] / (total - covered_sum)
new_area_frac = 1.0 * crop_area(new_crop) / crop_area(crop) - 1
if new_f1 > f1 or (
remaining_frac > 0.25 and new_area_frac < 0.15):
print('%d %s -> %s / %s (%s), %s -> %s / %s (%s), %s -> %s' % (
i, covered_sum, new_sum, total, remaining_frac,
crop_area(crop), crop_area(new_crop), area, new_area_frac,
f1, new_f1))
crop = new_crop
covered_sum = new_sum
del c_info[i]
changed = True
break
if not changed:
break
return crop | 5,356,178 |
def fetch_align_sex(rerun, run, camcol, field,
bands=None, reference_band='r', remove=True):
"""
Run fetch, align, and sex in a single field.
"""
if bands is None:
bands = [b for b in "ugriz"]
registered_images = fetch_align(rerun, run, camcol, field, remove=remove)
reference_image = [i for i in registered_images if 'registered' not in i][0]
catalog = run_sex(reference_image, remove=remove)
result = get_cutout(catalog, registered_images, bands)
if remove:
for image in registered_images:
if os.path.exists(image):
os.remove(image)
filename = os.path.join("result", reference_image.replace(".fits", ".npy"))
if not os.path.exists("result"):
os.makedirs("result")
np.save(filename, result) | 5,356,179 |
def scrape(html):
"""์ ๊ทํํ์์ผ๋ก ๋์ ์ ๋ณด ์ถ์ถ"""
books = []
for partial_html in re.findall(r'<td class="left">Ma.*?</td>', html, re.DOTALL):
#๋์์ URL ์ถ์ถ
url = re.search(r'<a href="(.*?)">', partial_html).group(1)
url = 'http://www.hanbit.co.kr' + url
#ํ๊ทธ๋ฅผ ์ ๊ฑฐํด ๋์์ ์ ๋ชฉ ์ถ์ถ
title = re.sub(r'<.*?>', '', partial_html)
title = unescape(title)
books.append({'url': url, 'title': title})
return books | 5,356,180 |
def get_time_zone_offset(area_code):
""" Returns an integer offset value if it finds a matching area code,
otherwise returns None."""
if not isinstance(area_code, str):
area_code = str(area_code)
if area_code in area_code_mapping:
return area_code_mapping[area_code][1] | 5,356,181 |
def true_false_counts(series: pd.Series):
"""
input: a boolean series
returns: two-tuple (num_true, num_false)
"""
return series.value_counts().sort_index(ascending=False).tolist() | 5,356,182 |
def phyutility(DIR,alignment,min_col_occup,seqtype,min_chr=10):
"""
remove columns with occupancy lower than MIN_COLUMN_OCCUPANCY
remove seqs shorter than MIN_CHR after filter columns
"""
if DIR[-1] != "/": DIR += "/"
cleaned = alignment+"-cln"
if os.path.exists(DIR+cleaned): return cleaned
assert alignment.endswith(".aln"),\
"phyutility infile "+alignment+" not ends with .aln"
assert os.stat(DIR+alignment).st_size > 0, DIR+alignment+"empty"
assert seqtype == "aa" or seqtype == "dna","Input data type: dna or aa"
if seqtype == "aa":
cmd = ["phyutility","-aa","-clean",str(min_col_occup),"-in",\
DIR+alignment,"-out",DIR+alignment+"-pht"]
else:
cmd = ["phyutility","-clean",str(min_col_occup),"-in",\
DIR+alignment,"-out",DIR+alignment+"-pht"]
print " ".join(cmd)
os.system(" ".join(cmd))
assert os.path.exists(DIR+alignment+"-pht"),"Error phyutility"
#remove empty and very short seqs
outfile = open(DIR+cleaned,"w")
for s in read_fasta_file(DIR+alignment+"-pht"):
if len(s.seq.replace("-","")) >= min_chr:
outfile.write(s.get_fasta())
outfile.close()
os.remove(DIR+alignment+"-pht")
return cleaned | 5,356,183 |
def configProject(projectName):
""" read in config file"""
if projectName==None:return
filename=os.path.join(projectsfolder,unicode(projectName),u"project.cfg" ).encode("utf-8")
if projectName not in projects:
print 'Content-type: text/plain\n\n',"error in projects:",type(projectName),"projectName:",[projectName]
print projects
return
if os.path.exists(filename):
try:
config = ConfigObj(filename,encoding="UTF-8")
#config.BOM=True
if verbose : print "read", filename
except Exception, e:
if verbose : print "can't read config file:",filename,e
return
return readinContent(config,projectName) | 5,356,184 |
def bitwise_not(rasters, extent_type="FirstOf", cellsize_type="FirstOf", astype=None):
"""
The BitwiseNot operation
The arguments for this function are as follows:
:param rasters: array of rasters. If a scalar is needed for the operation, the scalar can be a double or string
:param extent_type: one of "FirstOf", "IntersectionOf", "UnionOf", "LastOf"
:param cellsize_type: one of "FirstOf", "MinOf", "MaxOf, "MeanOf", "LastOf"
:param astype: output pixel type
:return: the output raster
"""
return local(rasters, 13, extent_type=extent_type, cellsize_type=cellsize_type, astype=astype) | 5,356,185 |
def keyclean(key):
"""
Default way to clean table headers so they make good
dictionary keys.
"""
clean = re.sub(r'\s+', '_', key.strip())
clean = re.sub(r'[^\w]', '', clean)
return clean | 5,356,186 |
def test_compute_ts_map_downsampled(input_dataset):
"""Minimal test of compute_ts_image"""
spatial_model = GaussianSpatialModel(sigma="0.11 deg")
spectral_model = PowerLawSpectralModel(index=2)
model = SkyModel(spatial_model=spatial_model, spectral_model=spectral_model)
ts_estimator = TSMapEstimator(
model=model,
downsampling_factor=2,
kernel_width="1 deg",
selection_optional=["ul"]
)
result = ts_estimator.run(input_dataset)
assert_allclose(result["ts"].data[0, 99, 99], 1661.49, rtol=1e-2)
assert_allclose(result["niter"].data[0, 99, 99], 7)
assert_allclose(result["flux"].data[0, 99, 99], 1.065988e-09, rtol=1e-2)
assert_allclose(result["flux_err"].data[0, 99, 99], 4.005628e-11, rtol=1e-2)
assert_allclose(result["flux_ul"].data[0, 99, 99], 8.220152e-11, rtol=1e-2)
assert result["flux"].unit == u.Unit("cm-2s-1")
assert result["flux_err"].unit == u.Unit("cm-2s-1")
assert result["flux_ul"].unit == u.Unit("cm-2s-1")
# Check mask is correctly taken into account
assert np.isnan(result["ts"].data[0, 30, 40]) | 5,356,187 |
def get_rfactors_for_each(lpin):
"""
R-FACTORS FOR INTENSITIES OF DATA SET /isilon/users/target/target/Iwata/_proc_ox2r/150415-hirata/1010/06/DS/multi011_1-5/XDS_ASCII_fullres.HKL
RESOLUTION R-FACTOR R-FACTOR COMPARED
LIMIT observed expected
5.84 60.4% 50.1% 174
4.13 58.1% 51.5% 310
3.38 60.0% 54.6% 410
2.92 90.3% 76.1% 483
2.62 130.4% 100.3% 523
2.39 241.1% 180.5% 612
2.21 353.9% 277.9% 634
2.07 541.1% 444.0% 673
1.95 -99.9% -99.9% 535
total 84.5% 71.2% 4354
"""
read_flag = False
filename = None
ret = collections.OrderedDict() # {filename: list of [dmin, Robs, Rexpt, Compared]}
for l in open(lpin):
if "R-FACTORS FOR INTENSITIES OF DATA SET" in l:
filename = l.strip().split()[-1]
elif "LIMIT observed expected" in l:
read_flag = True
elif read_flag:
sp = l.strip().replace("%","").split()
if len(sp) == 4:
dmin, robs, rexp, compared = sp
if dmin != "total": dmin = float(dmin)
else: dmin, read_flag = None, False
robs, rexp = map(float, (robs, rexp))
compared = int(compared)
ret.setdefault(filename, []).append([dmin, robs, rexp, compared])
return ret | 5,356,188 |
def value_left(self, right):
"""
Returns the value of the right type instance to use in an
operator method, namely when the method's instance is on the
left side of the expression.
"""
return right.value if isinstance(right, self.__class__) else right | 5,356,189 |
def correct_throughput(inspec, spFile='BT-Settl_Asplund2009.fits', quiet=False):
"""
Main function
Inputs:
inspec - list of input spectra, each list item should
be a 3xN array of wavelenghts (in microns),
flux, and variance. One list item for each
order for orders 71-77
spFile - (optional) path to fits file containing
BT-Setll grid, default: BT-Settl_Asplund2009.fits
quiet - set True to turn off all printed output
Returns:
wave - wavelength array of final combined spectrum
flam - flux array
fvar - variance array
"""
## Read in synthetic spectrum grid
spgrid, spwave, spaxes = readGrid(spFile)
## Parse input spectrum
waves, flams, fvars = parseSpec(inspec, spwave)
## Define cheby grid
norder, npix = waves.shape
chebx = np.linspace(-1,1,npix)
## Initial guesses
## Polynomial to correct for blaze function
nbpoly = 3
bpolys = np.zeros((norder, nbpoly+1))
## Polynomial to correct wavelength
nwpoly = 1
wpolys = np.zeros((norder, nwpoly+1))
wpolys[:,0] = 1.0
for i in range(norder):
bpolys[i] = chebfit(chebx, 1./flams[i], nbpoly)
rv = getrv(waves[i], flams[i]*chebval(chebx,bpolys[i]), spwave, spgrid[:,9,2])
wpolys[i,0] = (1.+rv/3e5)
## Model parameters
teff = 3500
mh = 0.0
ips = np.array([np.hstack((bpolys[i],wpolys[i])) for i in range(norder)])
## Loop over entire model grid and fit for each order
chi2s = np.zeros([norder,spgrid.shape[1],spgrid.shape[2]])
chi2s [:] = 9e9
ps = np.tile(np.zeros_like(ips[0]), [norder,spgrid.shape[1],spgrid.shape[2],1])
for k in range(0, spgrid.shape[1]):
for l in range(spgrid.shape[2]):
if not quiet:
print('Teff = {0}, [M/H] = {1}'.format(spaxes[0][k],spaxes[1][l]))
for i in range(norder):
flam = flams[i]
fvar = fvars[i]
wave = waves[i]
fit = minimize(fitFun, ips[i], args=(wave,flam,fvar,nbpoly,chebx,spwave,spgrid,k,l))
chi2s[i,k,l] = fit['fun']
ps[i,k,l] = fit['x']
#if not quiet:
# print(' '+fit['message'])
# print(' '+str(fit['x']))
# print(' '+str(fit['fun']))
# print()
if not quiet:
print(np.mean(chi2s[:,k,l]))
mink, minl = np.unravel_index(np.argmin(np.sum(chi2s,0)),[len(spaxes[0]),len(spaxes[1])])
bpolys, wpolys = np.split(ps[:,mink,minl], [nbpoly+1], axis=1)
teff = spaxes[0][mink]
mh = spaxes[1][minl]
## Correct everything
corrwaves = np.zeros_like(waves)
corrflams = np.zeros_like(flams)
corrfvars = np.zeros_like(fvars)
for i in range(norder):
corrwaves[i] = waves[i] * chebval(chebx, wpolys[i])
corrflams[i] = flams[i] * chebval(chebx, bpolys[i])
corrfvars[i] = (np.sqrt(fvars[i]) * chebval(chebx, bpolys[i]))**2.
## Flatten and sort
wave = corrwaves.flatten()
srt = np.argsort(wave)
wave = wave[srt]
flam = corrflams.flatten()[srt]
fvar = corrfvars.flatten()[srt]
return wave, flam, fvar | 5,356,190 |
def convert_list(
items,
ids,
parent,
attr_type,
):
"""Converts a list into an XML string."""
LOG.info('Inside convert_list()')
output = []
addline = output.append
if ids:
this_id = get_unique_id(parent)
for (i, item) in enumerate(items):
LOG.info('Looping inside convert_list(): item="%s", type="%s"'
% (unicode_me(item), type(item).__name__))
attr = ({} if not ids else {'id': '%s_%s' % (this_id, i + 1)})
if isinstance(item, numbers.Number) or type(item) in (str,
unicode):
addline(convert_kv('item', item, attr_type, attr))
elif hasattr(item, 'isoformat'):
# datetime
addline(convert_kv('item', item.isoformat(), attr_type,
attr))
elif type(item) == bool:
addline(convert_bool('item', item, attr_type, attr))
elif isinstance(item, dict):
if not attr_type:
addline('<item>%s</item>' % convert_dict(item, ids,
parent, attr_type))
else:
addline('<item type="dict">%s</item>'
% convert_dict(item, ids, parent, attr_type))
elif isinstance(item, collections.Iterable):
if not attr_type:
addline('<item %s>%s</item>' % (make_attrstring(attr),
convert_list(item,
ids,
'item',
attr_type)))
else:
addline('<item type="list"%s>%s</item>'
% (make_attrstring(attr), convert_list(item,
ids,
'item',
attr_type)))
elif item is None:
addline(convert_none('item', None, attr_type, attr))
else:
raise TypeError('Unsupported data ' /
'type: %s (%s)' % (item, type(item).__name__))
return ''.join(output) | 5,356,191 |
def ReduceDureeEtat(id_individu):
"""Rรฉduit d'un jour la durรฉe restante de l'รฉtat d'un individu"""
pop_cur.execute("UPDATE etat SET duree_etat = duree_etat - 1 WHERE id_individu = ?", (int(id_individu), )) | 5,356,192 |
def get_mid_surface(in_surfaces):
"""get_mid_surface gives the mid surface when dealing with the 7 different surfaces
Args:
(list of strings) in_surfaces : List of path to the 7 different surfaces generated by mris_expand
Returns:
(string) Path to the mid surface
"""
return in_surfaces[3] | 5,356,193 |
def parse_type(msg_type):
"""
Parse ROS message field type
:param msg_type: ROS field type, ``str``
:returns: base_type, is_array, array_length, ``(str, bool, int)``
:raises: :exc:`ValueError` If *msg_type* cannot be parsed
"""
if not msg_type:
raise ValueError("Invalid empty type")
if '[' in msg_type:
var_length = msg_type.endswith('[]')
splits = msg_type.split('[')
if len(splits) > 2:
raise ValueError("Currently only support 1-dimensional array types: %s"%msg_type)
if var_length:
return msg_type[:-2], True, None
else:
try:
length = int(splits[1][:-1])
return splits[0], True, length
except ValueError:
raise ValueError("Invalid array dimension: [%s]"%splits[1][:-1])
else:
return msg_type, False, None | 5,356,194 |
def preprocess(feature_modules: List, queries: List[Query],
prefix: Optional[str] = None,
process_count: Optional[int] = None):
"""
Args:
feature_modules: the feature modules used to generate features, each must implement the add_features function
queries: all the queri objects that have to be preprocessed
prefix: prefix for the output files, ./preprocessed-data- by default
process_count: how many subprocesses will I run simultaneously, by default takes all available cpu cores.
"""
if process_count is None:
process_count = cpu_count()
if prefix is None:
prefix = "preprocessed-data"
pool_function = partial(_preprocess_one_query, prefix,
[m.__name__ for m in feature_modules])
with Pool(process_count) as pool:
pool.map(pool_function, queries)
output_paths = glob(f"{prefix}-*.hdf5")
return output_paths | 5,356,195 |
def to_distance(maybe_distance_function):
"""
Parameters
----------
maybe_distance_function: either a Callable, which takes two arguments, or
a DistanceFunction instance.
Returns
-------
"""
if maybe_distance_function is None:
return NoDistance()
if isinstance(maybe_distance_function, DistanceFunction):
return maybe_distance_function
return SimpleFunctionDistance(maybe_distance_function) | 5,356,196 |
def echo(text):
"""Return echo function."""
return text | 5,356,197 |
def test_issue3972(en_vocab):
"""Test that the PhraseMatcher returns duplicates for duplicate match IDs.
"""
matcher = PhraseMatcher(en_vocab)
matcher.add("A", None, Doc(en_vocab, words=["New", "York"]))
matcher.add("B", None, Doc(en_vocab, words=["New", "York"]))
doc = Doc(en_vocab, words=["I", "live", "in", "New", "York"])
matches = matcher(doc)
assert len(matches) == 2 | 5,356,198 |
def include_package(config):
"""Pyramid package include"""
# add translations
config.add_translation_dirs('pyams_portal:locales')
# register permissions
config.register_permission({
'id': MANAGE_TEMPLATE_PERMISSION,
'title': _("Manage presentation templates")
})
# register roles
config.register_role({
'id': DESIGNER_ROLE,
'title': _("Designer (role)"),
'permissions': {
PUBLIC_PERMISSION, VIEW_PERMISSION, VIEW_SYSTEM_PERMISSION,
MANAGE_TEMPLATE_PERMISSION
},
'managers': {
ADMIN_USER_ID,
ROLE_ID.format(SYSTEM_ADMIN_ROLE)
}
})
# add portal support to site root
classImplements(BaseSiteRoot, IPortalContext)
try:
import pyams_zmi # pylint: disable=import-outside-toplevel,unused-import
except ImportError:
config.scan(ignore=[re.compile(r'pyams_portal\..*\.zmi\.?.*').search])
else:
config.scan() | 5,356,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.