content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def gate_settle(gate):
""" Return gate settle times """
return 0 | f452a343550c4f7be2133119c89dc386665921c4 | 3,652,086 |
def fov_gc(lons, lats):
"""Field of view great circle.
Properties
----------
lons: [float]
Field of view longitudes (degE).
lats: [float]
Field of view latitudes (degN).
Return
------
geojson.Feature
GeoJSON field of view polygon.
"""
return geo_polygon(lons, lats, 'Limb', 'Limb field of view', 'blue') | 3013648c04e5626c51995288afd6e441d3afef30 | 3,652,088 |
import ietf.sync.rfceditor
from ietf.doc.templatetags.mail_filters import std_level_prompt
def request_publication(request, name):
"""Request publication by RFC Editor for a document which hasn't
been through the IESG ballot process."""
class PublicationForm(forms.Form):
subject = forms.CharField(max_length=200, required=True)
body = forms.CharField(widget=forms.Textarea, required=True, strip=False)
doc = get_object_or_404(Document, type="draft", name=name, stream__in=("iab", "ise", "irtf"))
if not is_authorized_in_doc_stream(request.user, doc):
permission_denied(request, "You do not have the necessary permissions to view this page.")
consensus_event = doc.latest_event(ConsensusDocEvent, type="changed_consensus")
m = Message()
m.frm = request.user.person.formatted_email()
(m.to, m.cc) = gather_address_lists('pubreq_rfced',doc=doc).as_strings()
m.by = request.user.person
next_state = State.objects.get(used=True, type="draft-stream-%s" % doc.stream.slug, slug="rfc-edit")
if request.method == 'POST' and not request.POST.get("reset"):
form = PublicationForm(request.POST)
if form.is_valid():
events = []
# start by notifying the RFC Editor
response, error = ietf.sync.rfceditor.post_approved_draft(settings.RFC_EDITOR_SYNC_NOTIFICATION_URL, doc.name)
if error:
return render(request, 'doc/draft/rfceditor_post_approved_draft_failed.html',
dict(name=doc.name,
response=response,
error=error))
m.subject = form.cleaned_data["subject"]
m.body = form.cleaned_data["body"]
m.save()
if doc.group.acronym != "none":
m.related_groups.set([doc.group])
m.related_docs.set([doc])
send_mail_message(request, m)
# IANA copy
(m.to, m.cc) = gather_address_lists('pubreq_rfced_iana',doc=doc).as_strings()
send_mail_message(request, m, extra=extra_automation_headers(doc))
e = DocEvent(doc=doc, type="requested_publication", rev=doc.rev, by=request.user.person)
e.desc = "Sent request for publication to the RFC Editor"
e.save()
events.append(e)
# change state
prev_state = doc.get_state(next_state.type_id)
if next_state != prev_state:
doc.set_state(next_state)
e = add_state_change_event(doc, request.user.person, prev_state, next_state)
if e:
events.append(e)
doc.save_with_history(events)
return redirect('ietf.doc.views_doc.document_main', name=doc.name)
else:
if doc.intended_std_level_id in ("std", "ds", "ps", "bcp"):
action = "Protocol Action"
else:
action = "Document Action"
subject = "%s: '%s' to %s (%s-%s.txt)" % (action, doc.title, std_level_prompt(doc), doc.name, doc.rev)
body = generate_publication_request(request, doc)
form = PublicationForm(initial=dict(subject=subject,
body=body))
return render(request, 'doc/draft/request_publication.html',
dict(form=form,
doc=doc,
message=m,
next_state=next_state,
consensus_filled_in=(
True if (doc.stream_id and doc.stream_id=='ietf')
else (consensus_event != None and consensus_event.consensus != None)),
),
) | d6377b08c5eae6740e98a154d991ba268ed37815 | 3,652,090 |
def strip_trailing_characters(unstripped_string, tail):
"""
Strip the tail from a string.
:param unstripped_string: The string to strip. Ex: "leading"
:param tail: The trail to remove. Ex: "ing"
:return: The stripped string. Ex: "lead"
"""
if unstripped_string.endswith(str(tail)):
return unstripped_string[:len(tail)]
else:
return unstripped_string | dbd09fe9a58b0fb3072a680a9c7ac701257ebfcd | 3,652,091 |
def is_prime(x):
""" Prove if number is prime """
if x == 0 or x == 1:
return 0
for i in range(2, x//2 +1):
if x % i == 0:
return 0
return 1 | 63980c49b9ea05458ecafe874073805df50ce1d0 | 3,652,092 |
import pickle
def obj_to_str(obj, encoding='utf8') -> str:
"""
Examples:
>>> d = dict(a=1, b=2)
>>> assert isinstance(obj_to_str(d), str)
"""
b = pickle.dumps(obj)
return bytes_to_str(b, encoding=encoding) | 76c87052596aefcbd15a5135379ff2a3512bed77 | 3,652,093 |
def sample_ellipsoid(p0, covmat, size=1):
"""
Produce an ellipsoid of walkers around an initial parameter value,
according to a covariance matrix.
:param p0: The initial parameter value.
:param covmat:
The covariance matrix. Must be symmetric-positive definite or
it will raise the exception numpy.linalg.LinAlgError
:param size: The number of samples to produce.
"""
return np.random.multivariate_normal(
np.atleast_1d(p0), np.atleast_2d(covmat), size=size
) | a09448f29920a7758a549ede80608c8c4dd9892a | 3,652,095 |
def avg_pool_2d(x, size=(2, 2), stride=(2, 2), name='avg_pooling', padding='VALID'):
"""
Average pooling 2D Wrapper
:param x: (tf.tensor) The input to the layer (N,H,W,C).
:param size: (tuple) This specifies the size of the filter as well as the stride.
:param name: (string) Scope name.
:return: The output is the same input but halfed in both width and height (N,H/2,W/2,C).
"""
size_x, size_y = size
stride_x, stride_y = stride
return tf.nn.avg_pool(x, ksize=[1, size_x, size_y, 1], strides=[1, stride_x, stride_y, 1], padding=padding, name=name) | b39dfed959f43346c48d13b7e41601999c1b7f8b | 3,652,096 |
from django.contrib.auth import get_user_model
def get_username_field() -> str:
"""Get custom username field.
Returns:
str: username field.
"""
user_model = get_user_model()
return getattr(user_model, "USERNAME_FIELD", "username") | 45dfe6888d8c69e012b98a0edd1b639b7bf56af7 | 3,652,098 |
def get_header(yaml_dict):
"""
Header merely comprises the access token
:return:
"""
headers = {"Authorization": "Bearer {}".format(get_access_token(yaml_dict)),
"Content-Type": "application/json"}
return headers | fb93b304cdb960f1eec7396a92262fde94699126 | 3,652,100 |
def change_filename_extension(filename: str, old_ext: str, new_ext: str) -> str:
"""
Change extension of a filename (e.g. "data.csv" to "data.json").
:param filename: the old filename (including extension)
:param old_ext: the extension of the old filename
:param new_ext: the extension to replace the old extension
:return: a filename with the new extension
"""
dbg.dassert(
filename.endswith(old_ext),
"Extension '%s' doesn't match file '%s'",
old_ext,
filename,
)
# Remove the old extension.
new_filename = filename.rstrip(old_ext)
# Add the new extension.
new_filename = new_filename + new_ext
return new_filename | acf9d75383fafaeaf6bf42e46ef8d01080661172 | 3,652,101 |
def parseArgPairToBoundaryArray(pair, mesh):
"""
Parse boundary related pair argument to create a list of
[ :gimliapi:`GIMLI::Boundary`, value|callable ].
Parameters
----------
pair: tuple
- [marker, arg]
- [marker, [callable, *kwargs]]
- [marker, [arg_x, arg_y, arg_z]]
- [boundary, arg]
- ['*', arg]
- [node, arg]
- [[marker, ...], arg] (REMOVE ME because of bad design)
- [[boundary,...], arg] (REMOVE ME because of bad design)
- [marker, callable, *kwargs] (REMOVE ME because of bad design)
- [[marker, ...], callable, *kwargs] (REMOVE ME because of bad design)
arg will be parsed by
:py:mod:`pygimli.solver.solver.generateBoundaryValue`
and distributed to each boundary.
Callable functions will be executed at run time.
'*' will be interpreted as all boundary elements with one neighboring cell
mesh: :gimliapi:`GIMLI::Mesh`
Used to find boundaries by marker.
Returns
-------
bc: list()
[:gimliapi:`GIMLI::Boundary`, value|callable]
"""
bc = []
bounds = []
if isinstance(pair[1], list):
# [marker, [callable, *kwargs]]
if callable(pair[1][0]):
pair = [pair[0]] + pair[1]
if pair[0] == '*':
mesh.createNeighborInfos()
for b in mesh.boundaries():
if b.leftCell() is not None and b.rightCell() is None:
bounds.append(b)
elif isinstance(pair[0], int):
bounds = mesh.findBoundaryByMarker(pair[0])
elif isinstance(pair[0], pg.core.Node):
bc.append(pair)
return bc
####### bad Design .. need to remove
elif isinstance(pair[0], list):
print(pair[0], pair[0][0])
pg.deprecated('bad design')
# [[,,..], ]
for b in pair[0]:
for bi in mesh.boundaries(pg.find(mesh.boundaryMarkers() == b)):
bounds.append(bi)
elif isinstance(pair[0], pg.core.stdVectorBounds):
pg.deprecated('bad design')
pg.warn('in use? pair[0], pg.core.stdVectorBounds)')#20200115
bounds = pair[0]
elif isinstance(pair[0], pg.core.Boundary):
pg.warn('in use? isinstance(pair[0], pg.core.Boundary)')#20200115
bc.append(pair)
return bc
####### bad Design .. need to remove
for b in bounds:
val = None
if len(pair) > 2:
val = pair[1:]
else:
val = pair[1]
bc.append([b, val])
# print('-'*50)
# print(b, pair[1], callable(pair[1]))
# print('+'*50)
# if callable(pair[1]):
# # don't execute the callable here
# # we want to call them at runtime
# if len(pair) > 2:
# val = pair[1:]
# else:
# val = pair[1]
# else:
# this will be executed
#val = generateBoundaryValue(b, pair[1])
#print('#'*30)
return bc | e4827117dfa3b1b6683f2af51ed90bd6f2edf170 | 3,652,102 |
def get_niter(outcarfile):
"""
Get the number of ionic steps that were run
Args:
outcarfile (string): full path to OUTCAR file
Returns:
niter (int): number of ionic iterations
"""
with open(outcarfile,'r') as rf:
for line in rf:
if '- Iteration' in line:
niter = line.split('(')[0].split('n')[-1].strip()
niter = int(niter)
return niter | c64a5cc399cabc41a0fc7ca6fee35970c3db0444 | 3,652,103 |
def bucket_contvar(ex, ctrl, num_buckets):
"""
Given ex, which contains a continuous value for a particular control variable,
return the bucketed version of that control value.
Inputs:
ex: message dictionary. Assume it has key ctrl, mapping to the value.
ctrl: string. The name of the CT control.
num_buckets: int. The number of buckets for this control variable.
"""
if ctrl not in ex.keys():
raise ValueError(
"Control %s not found in example. Available keys in "
"this example: %s" % (ctrl, ', '.join(ex.keys()))
)
# Get the control variable value
ctrl_val = ex[ctrl] # string. the value of the control variable for this example
if ctrl == 'avg_nidf':
ctrl_val = float(ctrl_val)
assert ctrl_val >= 0
assert ctrl_val <= 1
elif ctrl == 'lastuttsim':
if ctrl_val == 'None': # bot goes first in conversation
assert num_buckets == 11
return 10 # The last bucket is for when the bot goes first
else:
ctrl_val = float(ctrl_val)
assert ctrl_val >= -1
assert ctrl_val <= 1
else:
raise ValueError('Unexpected CT ctrl: %s' % ctrl)
# Get the bucket lowerbounds
bucket_lbs = CONTROL2BUCKETLBS[(ctrl, num_buckets)] # lst len num_buckets of floats
if ctrl == 'lastuttsim':
# The 'bot goes first' bucket 10 has no lower bound
assert len(bucket_lbs) == num_buckets - 1
else:
assert len(bucket_lbs) == num_buckets
# Determine the correct bucket and return the bucket id
return sort_into_bucket(ctrl_val, bucket_lbs) | 182f9bd01ec81a18b0555afebc40183982d997e9 | 3,652,104 |
def handle_exception(error):
"""
Flask error handler for Exception
Parameters
----------
error : Exception
An Exception error
Returns
-------
string
A JSON string of the Exception error response
"""
response = create_error_response(error)
return response, 500 | 6827d310804a65ff26d6abc036ec60ff94ae4ab7 | 3,652,105 |
import re
def isphone(value, locale='en-US'):
"""
Return whether or not given value is valid mobile number according to given locale. Default locale is 'en-US'.
If the value is valid mobile number, this function returns ``True``, otherwise ``False``.
Supported locales are: ``ar-DZ``, ``ar-SY``, ``ar-SA``, ``en-US``, ``en-CA``, ``cs-CZ``, ``de-DE``, ``da-DK``
``el-GR``, ``en-AU``, ``en-GB``, ``en-HK``, ``zh-HK``, ``en-IN``, ``en-NG``, ``en-NZ``, ``en-ZA``, ``en-ZM``
``es-ES``, ``fi-FI``, ``fr-FR``, ``he-IL``, ``hu-HU``, ``id-ID``, ``it-IT``, ``ja-JP``, ``ms-MY``, ``nb-NO``
``nl-BE``, ``fr-BE``, ``nn-NO``, ``pl-PL``, ``pt-BR``, ``pt-PT``, ``ro-RO``, ``en-PK``, ``ru-RU``, ``sr-RS``
``tr-TR``, ``vi-VN``, ``zh-CN``, ``zh-TW``, ``bn-BD``
Examples::
>>> isphone('+15673628910', 'en-US')
True
>>> isphone('+10345672645', 'en-US')
False
:param value: string to validate mobile number
:param locale: locale of mobile number to validate
"""
phones = {
'ar-DZ': r'^(\+?213|0)(5|6|7)\d{8}$',
'ar-SY': r'^(!?(\+?963)|0)?9\d{8}$',
'ar-SA': r'^(!?(\+?966)|0)?5\d{8}$',
'bn-BD': r'^(\+?88)?(01[56789]\d{2}(\s|\-)?\d{6})$',
'en-US': r'^(\+?1)?[2-9]\d{2}[2-9](?!11)\d{6}$',
'cs-CZ': r'^(\+?420)? ?[1-9][0-9]{2} ?[0-9]{3} ?[0-9]{3}$',
'de-DE': r'^(\+?49[ \.\-])?([\(]{1}[0-9]{1,6}[\)])?([0-9 \.\-\']{3,20})((x|ext|extension)[ ]?[0-9]{1,4})?$',
'da-DK': r'^(\+?45)?(\d{8})$',
'el-GR': r'^(\+?30)?(69\d{8})$',
'en-AU': r'^(\+?61|0)4\d{8}$',
'en-GB': r'^(\+?44|0)7\d{9}$',
'en-HK': r'^(\+?852\-?)?[569]\d{3}\-?\d{4}$',
'en-IN': r'^(\+?91|0)?[789]\d{9}$',
'en-NG': r'^(\+?234|0)?[789]\d{9}$',
'en-NZ': r'^(\+?64|0)2\d{7,9}$',
'en-ZA': r'^(\+?27|0)\d{9}$',
'en-ZM': r'^(\+?26)?09[567]\d{7}$',
'es-ES': r'^(\+?34)?(6\d{1}|7[1234])\d{7}$',
'fi-FI': r'^(\+?358|0)\s?(4(0|1|2|4|5)?|50)\s?(\d\s?){4,8}\d$',
'fr-FR': r'^(\+?33|0)[67]\d{8}$',
'he-IL': r'^(\+972|0)([23489]|5[0248]|77)[1-9]\d{6}',
'hu-HU': r'^(\+?36)(20|30|70)\d{7}$',
'id-ID': r'^(\+?62|0[1-9])[\s|\d]+$',
'it-IT': r'^(\+?39)?\s?3\d{2} ?\d{6,7}$',
'ja-JP': r'^(\+?81|0)\d{1,4}[ \-]?\d{1,4}[ \-]?\d{4}$',
'ms-MY': r'^(\+?6?01){1}(([145]{1}(\-|\s)?\d{7,8})|([236789]{1}(\s|\-)?\d{7}))$',
'nb-NO': r'^(\+?47)?[49]\d{7}$',
'nl-BE': r'^(\+?32|0)4?\d{8}$',
'nn-NO': r'^(\+?47)?[49]\d{7}$',
'pl-PL': r'^(\+?48)? ?[5-8]\d ?\d{3} ?\d{2} ?\d{2}$',
'pt-BR': r'^(\+?55|0)\-?[1-9]{2}\-?[2-9]{1}\d{3,4}\-?\d{4}$',
'pt-PT': r'^(\+?351)?9[1236]\d{7}$',
'ro-RO': r'^(\+?4?0)\s?7\d{2}(\'|\s|\.|\-)?\d{3}(\s|\.|\-)?\d{3}$',
'en-PK': r'^((\+92)|(0092))-{0,1}\d{3}-{0,1}\d{7}$|^\d{11}$|^\d{4}-\d{7}$',
'ru-RU': r'^(\+?7|8)?9\d{9}$',
'sr-RS': r'^(\+3816|06)[- \d]{5,9}$',
'tr-TR': r'^(\+?90|0)?5\d{9}$',
'vi-VN': r'^(\+?84|0)?((1(2([0-9])|6([2-9])|88|99))|(9((?!5)[0-9])))([0-9]{7})$',
'zh-CN': r'^(\+?0?86\-?)?1[345789]\d{9}$',
'zh-TW': r'^(\+?886\-?|0)?9\d{8}$'
}
phones['en-CA'] = phones['en-US']
phones['fr-BE'] = phones['nl-BE']
phones['zh-HK'] = phones['en-HK']
loc = phones.get(locale)
if loc is None:
raise ValueError('Please provide a supported locale.')
else:
loc_pattern = re.compile(loc)
return bool(loc_pattern.match(value)) | 2e3de8fb6aad000c21ea560521f81c4e9bf2e090 | 3,652,106 |
def _darken(color):
"""
Takes a hexidecimal color and makes it a shade darker
:param color: The hexidecimal color to darken
:return: A darkened version of the hexidecimal color
"""
# Get the edge color
darker = "#"
hex1 = color[1:3]
hex2 = color[3:5]
hex3 = color[5:7]
for val in [hex1, hex2, hex3]:
if val == "00":
darker += "00"
else:
x = int(val, base=16)
x -= int("11", base=16)
x = str(hex(x))[2:].upper()
darker += x
return darker | 5b43785572f9685906e73f4bf856cf4d693f6411 | 3,652,107 |
from datetime import datetime
def commit_datetime(author_time: str, author_tz: str):
"""
Convert a commit's timestamp to an aware datetime object.
Args:
author_time: Unix timestamp string
author_tz: string in the format +hhmm
Returns:
datetime.datetime object with tzinfo
"""
# timezone info looks like +hhmm or -hhmm
tz_hours = int(author_tz[:3])
th_minutes = int(author_tz[0] + author_tz[3:])
return datetime.fromtimestamp(
int(author_time), timezone(timedelta(hours=tz_hours, minutes=th_minutes))
) | d44f7a693ad3c3a6efe97be3707d14b5514bf65e | 3,652,108 |
def flatten_acfg_list(acfg_list):
"""
Returns a new config where subconfig params are prefixed by subconfig keys
"""
flat_acfg_list = []
for acfg in acfg_list:
flat_dict = {
prefix + '_' + key: val
for prefix, subdict in acfg.items()
for key, val in subdict.items()
}
flat_acfg_list.append(flat_dict)
return flat_acfg_list | ae586bc49ee31db022f388492acbbf5e8d02b09d | 3,652,109 |
def happy_birthday(name: hug.types.text, age: hug.types.number):
"""Says happy birthday to a user"""
return "Happy {0} Birthday {1}!".format(name, age) | 84cf051205db60566bd4fcd07c0d8f31f01c65cb | 3,652,110 |
def _format_d10_singlecell(row):
"""
Format the D10 input data for a single cell (corresponds to a single row
in the input csv file).
"""
nlayers = int(row['nlayer'])
if nlayers == 0:
# This means this cell cannot be run in HELP.
return None
try:
title = str(int(row['cid']))
except ValueError:
title = str(row['cid'])
iu10 = 2
ipre = 0
irun = 1
osno = 0 # initial snow water
area = 6.25 # area projected on horizontal plane
frunof = 100
runof = float(row['CN'])
d10dat = []
# READ (10, 5070) TITLE
# 5070 FORMAT(A60)
d10dat.append(['{0:<60}'.format(title)])
# READ (10, 5080) IU10, IPRE, OSNO, AREA, FRUNOF, IRUN
# 5080 FORMAT(I2,I2,2F10.0,F6.0,I2)
d10dat.append(['{0:>2}'.format(iu10) +
'{0:>2}'.format(ipre) +
'{0:>10.0f}'.format(osno) +
'{0:>10.0f}'.format(area) +
'{0:>6.0f}'.format(frunof) +
'{0:>2}'.format(irun)])
# IF (IRUN .EQ. 1) READ (10, 5090) CN2
# 5090 FORMAT(F7.0)
d10dat.append(['{0:>7.0f}'.format(runof)])
# Format the layer properties.
for i in range(nlayers):
lay = str(i+1)
layer = int(row['lay_type'+lay])
thick = max(float(row['thick'+lay]), MINTHICK)
isoil = 0
poro = float(row['poro'+lay])
fc = float(row['fc'+lay])
wp = float(row['wp'+lay])
sw = ''
rc = float(row['ksat'+lay])
xleng = float(row['dist_dr'+lay])
slope = float(row['slope'+lay])
# Check that all values are valid for the layer.
check = [val == -9999 for val in
(thick, poro, fc, wp, rc, xleng, slope)]
if any(check):
return None
# READ (10, 5120) LAYER (J), THICK (J), ISOIL (J),
# PORO (J), FC (J), WP (J), SW (J), RC (J)
# 5120 FORMAT(I2,F7.0,I4,4F6.0,F16.0)
d10dat.append(['{0:>2}'.format(layer) +
'{0:>7.0f}'.format(thick) +
'{0:>4}'.format(isoil) +
'{0:>6.3f}'.format(poro) +
'{0:>6.3f}'.format(fc) +
'{0:>6.3f}'.format(wp) +
'{0:>6}'.format(sw) +
'{0:>16.14f}'.format(rc)])
recir = subin = phole = defec = ipq = trans = ''
layr = 0
# READ (10, 5130) XLENG (J), SLOPE (J), RECIR (J), LAYR (J),
# SUBIN (J), PHOLE (J), DEFEC (J), IPQ (J), TRANS (J)
# 5130 FORMAT(F7.0,2F6.0,I3,F13.0,2F7.0,I2,G14.6)
d10dat.append(['{0:>7.0f}'.format(xleng) +
'{0:>6.2f}'.format(slope) +
'{0:>6}'.format(recir) +
'{0:>3}'.format(layr) +
'{0:>13}'.format(subin) +
'{0:>7}'.format(phole) +
'{0:>7}'.format(defec) +
'{0:>2}'.format(ipq) +
'{0:>14}'.format(trans)])
return d10dat | 567fa7e8582174d6aa1b3ce77441039ecae8c6cf | 3,652,111 |
def logical_and(x, y, out=None, name=None):
"""
:alias_main: paddle.logical_and
:alias: paddle.logical_and,paddle.tensor.logical_and,paddle.tensor.logic.logical_and
:old_api: paddle.fluid.layers.logical_and
logical_and Operator
It operates element-wise on X and Y, and returns the Out. X, Y and Out are N-dim boolean LoDTensor or Tensor.
Each element of Out is calculated by
.. math::
Out = X \land Y
Args:
x(${x_type}): ${x_comment}
y(${y_type}): ${y_comment}
out(LoDTensor or Tensor): The LoDTensor or Tensor that specifies the output of the operator, which can be any Variable that has been created in the program. The default value is None, and a new Variable will be created to save the output.
name(str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
${out_type}: ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
# Graph organizing
x = fluid.layers.data(name='x', shape=[2], dtype='bool')
y = fluid.layers.data(name='y', shape=[2], dtype='bool')
res = fluid.layers.logical_and(x=x, y=y)
# The comment lists another available method.
# res = fluid.layers.fill_constant(shape=[2], dtype='bool', value=0)
# fluid.layers.logical_and(x=x, y=y, out=res)
# Create an executor using CPU as an example
exe = fluid.Executor(fluid.CPUPlace())
# Execute
x_i = np.array([[1, 0], [0, 1]]).astype(np.bool)
y_i = np.array([[1, 1], [0, 0]]).astype(np.bool)
res_val, = exe.run(fluid.default_main_program(), feed={'x':x_i, 'y':y_i}, fetch_list=[res])
print(res_val) # [[True, False], [False, False]]
"""
return _logical_op(
op_name="logical_and", x=x, y=y, name=name, out=out, binary_op=True) | df0f111f7acb6aafa47cf2a99e6bc45c91e77e08 | 3,652,112 |
def _create_tileZeros():
""" Create a function mapping to the Scala implementation."""
def _(cols, rows, cellType = 'float64'):
jfcn = RFContext.active().lookup('tile_zeros')
return Column(jfcn(cols, rows, cellType))
_.__name__ = 'tile_zeros'
_.__doc__ = "Create column of constant tiles of zero"
_.__module__ = THIS_MODULE
return _ | 94dc69bddf1359a2c4645e17a2546920f0c05925 | 3,652,113 |
import pkgutil
def load_modules():
"""
Dynamically loads all the modules in the modules folder and sorts
them by the PRIORITY key. If no PRIORITY is defined for a given
module, a priority of 0 is assumed.
"""
# logger = logging.getLogger(__name__)
locations = [marvin.support.path.PLUGIN_PATH]
modules = []
for finder, name, ispkg in pkgutil.walk_packages(locations):
try:
loader = finder.find_module(name)
mod = loader.load_module(name)
except:
Log.warn("Skipped loading module '{0}' due to an error.", name)
else:
# if hasattr(mod, 'WORDS'):
modules.append(mod)
# else:
# Log.warn("Skipped loading module '{0}' because it misses " +
# "the WORDS constant.", name)
modules.sort(key=lambda mod: mod.PRIORITY if hasattr(mod, 'PRIORITY')
else 0, reverse=True)
return modules | b8232e1663da6b062750cc1261bac229ea945539 | 3,652,114 |
def property_fragment():
"""Builds and returns a random Property init fragment."""
return _build_property_fragment() | 0db4483c98f2495ad6826818759e54f32a8778c4 | 3,652,116 |
def create_person_node(author):
"""
Parameters
----------
author : dict
author field of JSON file.
Returns
-------
ID : str
Document _id from 'Person' collection.
"""
given = author.get('given', '')
family = author.get('family', '')
ID = search_person(given, family)
if ID == 0:
collec = db.collection('Person')
para = {'URI': given+'_'+family, 'type': subject_type_author, 'sameas': '', 'given': given, 'family': family}
metadata = collec.insert(para)
ID = metadata['_id']
memo_name_ID.update({str((given, family)): ID})
print(ID, "created")
return ID
else:
return ID | bfb7f8f0061ad337257c9e43b5907520b79eb59b | 3,652,117 |
def create_date_list(startDt='2020-11-01', endDt='2020-12-01'):
"""
Create a date list ranging from start to end dates. Date output format = yyyy_mm
:startDt = beginning date for the range
:endDt = end date for the range
To run the current method requires a minimum one month difference between dates
FUTURE: Could provide more of the common date movements e.g. (M, Q, Y), and have these
added to the functionality with a keyword parameter
"""
dates = pd.date_range(startDt, endDt, freq='1M') - pd.offsets.MonthBegin(1)
listDates = [str(x.year)+"_"+str(x.month).zfill(2) for x in dates]
return listDates | 34b4105876e4e1977716bb307cd9536eb37482fb | 3,652,118 |
import torch
def batch_distance_metrics_from_coords(coords, mask):
"""
Given coordinates of neighboring atoms, compute bond
distances and 2-hop distances in local neighborhood
"""
d_mat_mask = mask.unsqueeze(1) * mask.unsqueeze(2)
if coords.dim() == 4:
two_dop_d_mat = torch.square(coords.unsqueeze(1) - coords.unsqueeze(2) + 1e-10).sum(dim=-1).sqrt() * d_mat_mask.unsqueeze(-1)
one_hop_ds = torch.linalg.norm(torch.zeros_like(coords[0]).unsqueeze(0) - coords, dim=-1)
elif coords.dim() == 5:
two_dop_d_mat = torch.square(coords.unsqueeze(2) - coords.unsqueeze(3) + 1e-10).sum(dim=-1).sqrt() * d_mat_mask.unsqueeze(-1).unsqueeze(1)
one_hop_ds = torch.linalg.norm(torch.zeros_like(coords[0]).unsqueeze(0) - coords, dim=-1)
return one_hop_ds, two_dop_d_mat | e8322310806debbdb4d2f3699eca4355cc9e3ed6 | 3,652,120 |
def ComputeHash256(buf: bytes) -> bytes:
"""ComputeHash256 Compute a cryptographically strong 256 bit hash of the input byte slice."""
return ComputeHash256Array(buf) | 1597d4ea67a4e74970576a1336a8687640aaca25 | 3,652,121 |
def acf_std(x, maxlag=None, periodogram=True,
confidence=0.6826895, simplified=True, acf_cached=None):
"""Computes the approximate standard deviation of the autocorrelation
coefficients.
Parameters
----------
x : ndarray
Input data.
maxlag : {None, int} optional
Maximum lag beyond which the ACF coefficient can be considered as null.
periodogram : {True, False}
Whether to use a periodogram-like estimate of the ACF or not.
confidence : {0.6826895, float} optional
Confidence level. The default value returns the standard deviation.
simplified : {True, False} optional
Whether to use a simplified or more complex approximation.
acf_cached : {ndarray} optional
Pre-computed acf coefficients.
Notes
-----
When simplified is True, the standard error is computed as:
\begin{equation}
var[r_k] &\appr \frac{1}{N} \left\{ 1 + 2 \sum_{j=1}^{+q}{ r_{j}^2 } \right\
\end{equation}
Otherwise, it is computed as:
\begin{equation}
\begin{split}
var[r_k] &\approx
\frac{1}{N} \sum_{j=-\infty}^{+\infty}{ \left\{
r_{j}^2 + r_{j+k} r_{j-k} - 4 r_{k} r_{j} r_{j-k} + 2 r_{j}^2 r_{k}^2
\right\} \\
\frac{1}{N} \sum_{j=-\infty}^{+\infty}{ \left\{
r_{j}^2 [ 1 + 2 r_{k}^2] + r_{j+k} r_{j-k} - 4 r_{k} r_{j} r_{j-k}
\right\}
\end{split}
\end{equation}
References
----------
Hippel & McLeod 1994: Time series modeling.
"""
if acf_cached is None:
acfx = acf(x,periodogram)
else:
acfx = acf_cached
n = x.size
r_i = acfx[:n]
rr_i = (r_i)**2
# Artifically set the ACF coefficients to 0 beyond lag maxlag
if maxlag > 0:
rr_i[maxlag:] = 0
# Compute the variance of the ACF coeffs
if simplified:
var_i = 1 + 2*rr_i.cumsum()
else:
var_i = (1 + 2 * rr_i) * rr_i.sum()
cov_ = np.correlate(r_i,r_i,'full')[n-1:]
var_i[:n//2] = cov_[::2]
var_i -= (4*r_i*cov_)
var_i /= float(n)
var_i[0] = 0
#....
std_i = np.sqrt(var_i)
std_i = np.concatenate([std_i, std_i[n-1:0:-1]])
#....
if confidence < 0.5:
confidence = 1.-confidence
thresh = norm.isf((1.-confidence)/2.)
std_i *= thresh
return std_i | 7720f0f07f901cb3dfbb59a9fdc01afcfb7caf6d | 3,652,122 |
def traverse_map(map, x_step, y_step):
"""
iterates over a "map" (array of strings) starting at the top left until reaching the
bottom of the map. every iteration advances position by <x_step,y_step> and checks if
a tree is hit
returns: the total number of Trees hit
rtype: int
"""
trees_hit = 0
map_depth = len(map)
y_steps = range(0,map_depth,y_step)
for j,step in enumerate(y_steps):
trees_hit += 1 if tree_in_path(map[step],j*x_step) else 0
return trees_hit | 42a21c070d25bfc962fa76f94a90417238057986 | 3,652,123 |
from typing import Optional
def q_to_res(Q: float) -> Optional[float]:
"""
:param Q: Q factor
:return: res, or None if Q < 0.25
"""
res = 1 - 1.25 / (Q + 1)
if res < 0.0:
return None
return res | 98380be0c8fbd3bfd694d7851f35488d74cdd862 | 3,652,124 |
import logging
def list_document_classifier():
"""[Lists Document Classifiers for Text Classification on AWS]
Raises:
error: [description]
Returns:
[list]: [List of Document Classifiers]
"""
try:
logging.info(f"List Document Classifiers")
return client.list_document_classifiers()
except Exception as error:
logging.error(f"{error=}")
raise error | 4efb390fa3ebcd4b1f163db5afdad66d8fdc2cf4 | 3,652,125 |
def id_str_to_bytes(id_str: str) -> bytes:
"""Convert a 40 characters hash into a byte array.
The conversion results in 160 bits of information (20-bytes array). Notice
that this operation is reversible (using `id_bytes_to_str`).
Args:
id_str: Hash string containing 40 characters.
Returns:
bytes: The ID converted to bytes.
"""
return int(id_str, 16).to_bytes(20, byteorder='big') | cd6a702343f1267e17710305f9aed70613feacb3 | 3,652,126 |
def transform(data):
"""Transform words and tags to ids
"""
new_data = []
unknown_word_count = 0
total_word_count = 0
for words, tags in data:
word_ids = [word_to_ix.get(w, word_to_ix[UNK]) for w in words]
tag_ids = [tag_to_ix.get(t) for t in tags]
new_data.append((word_ids, tag_ids))
# count
total_word_count += len(words)
for w in words:
if w not in word_to_ix:
unknown_word_count += 1
unknown_proportion = unknown_word_count / total_word_count
return new_data, unknown_proportion | 6a9f99521bfe157a16ad9dd4d933a46e25878205 | 3,652,127 |
from typing import Optional
def textarea():
""" Returns a textarea parser.
Example::
...[5]
The number defines the number of rows.
"""
rows = number_enclosed_in('[]')('rows')
textarea = Suppress('...') + Optional(rows)
textarea.setParseAction(tag(type='textarea'))
return textarea | 915d1da239c11a35674ac0f05581afd2fecbd92d | 3,652,128 |
from typing import Optional
def get_agent(agent_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAgentResult:
"""
This data source provides details about a specific Agent resource in Oracle Cloud Infrastructure Database Migration service.
Display the ODMS Agent configuration.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_agent = oci.databasemigration.get_agent(agent_id=oci_database_migration_agent["test_agent"]["id"])
```
:param str agent_id: The OCID of the agent
"""
__args__ = dict()
__args__['agentId'] = agent_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('oci:databasemigration/getAgent:getAgent', __args__, opts=opts, typ=GetAgentResult).value
return AwaitableGetAgentResult(
agent_id=__ret__.agent_id,
compartment_id=__ret__.compartment_id,
defined_tags=__ret__.defined_tags,
display_name=__ret__.display_name,
freeform_tags=__ret__.freeform_tags,
id=__ret__.id,
lifecycle_details=__ret__.lifecycle_details,
public_key=__ret__.public_key,
state=__ret__.state,
stream_id=__ret__.stream_id,
system_tags=__ret__.system_tags,
time_created=__ret__.time_created,
time_updated=__ret__.time_updated,
version=__ret__.version) | 5c364a4572811e46e8ade0c210ce6f7a3d4a025f | 3,652,129 |
def darknet():
"""Darknet-53 classifier.
"""
inputs = Input(shape=(416, 416, 3))
x = darknet_base(inputs)
x = GlobalAveragePooling2D()(x)
x = Dense(1000, activation='softmax')(x)
model = Model(inputs, x)
return model | fd4dc9d0b5e5f6dba1939084669d09f13456133f | 3,652,130 |
def create_process_chain_entry(input_object, python_file_url,
udf_runtime, udf_version, output_object):
"""Create a Actinia command of the process chain that uses t.rast.udf
:param strds_name: The name of the strds
:param python_file_url: The URL to the python file that defines the UDF
:param output_name: The name of the output raster layer
:return: A Actinia process chain description
"""
# rn = randint(0, 1000000)
pc = {"id": "t_rast_udf",
"module": "t.rast.udf",
"inputs": [{"import_descr": {"source": python_file_url,
"type": "file"},
"param": "pyfile",
"value": "$file::my_py_func"},
{"param": "input",
"value": input_object.grass_name()},
{"param": "output",
"value": output_object.grass_name()}]}
return pc | 78a76275a2f1dba30627f1a52acd88d2ce851ccc | 3,652,131 |
import requests
def add_user_to_authorization_domain(auth_domain_name, email, permission):
"""Add group with given permissions to authorization domain."""
# request URL for addUserToGroup
uri = f"https://api.firecloud.org/api/groups/{auth_domain_name}/{permission}/{email}"
# Get access token and and add to headers for requests.
# -H "accept: */*" -H "Authorization: Bearer [token]"
headers = {"Authorization": "Bearer " + get_access_token(), "accept": "*/*"}
# capture response from API and parse out status code
response = requests.put(uri, headers=headers)
status_code = response.status_code
if status_code != 204: # AD update with member fail
print(f"WARNING: Failed to update Authorization Domain, {auth_domain_name}, with group: {email}.")
print("Check output file for error details.")
return False, response.text
# AD update with member success
print(f"Successfully updated Authorization Domain, {auth_domain_name}, with group: {email}.")
return True, None | ac774e5d065c5dd5bb1994333c8893290c129162 | 3,652,132 |
import re
def error_032_link_two_pipes(text):
"""Fix some cases and return (new_text, replacements_count) tuple."""
(text, ignored) = ignore(text, r"\[\[\s*:?\s*{}.*?\]\]".format(IMAGE))
(text, count1) = re.subn(r"\[\[([^|\[\]\n]+)\|\|([^|\[\]\n]+)\]\]", "[[\\1|\\2]]", text)
(text, count2) = re.subn(r"\[\[([^|\[\]\n]+)\|([^|\[\]\n]+)\|\]\]", "[[\\1|\\2]]", text)
text = deignore(text, ignored)
return (text, count1 + count2) | 071a56e8f87fcbbbd423f9754c16b57dd7a90b01 | 3,652,133 |
import random
def define_answer(defined_answer):
"""
ランダムに「正解」を生成する
1桁ずつ、0~15までの乱数を引いて決めていく
count桁目の乱数(digit_kari)を引いた時、count-1桁目までの数字と重複がないかをチェック。
重複がなければ、引いた乱数(digit_kari)をans_list[count]に保存。
重複してたらその桁の乱数を引き直す。
"""
global ans_str #,ans_list
if type(defined_answer) == str and len(defined_answer) == 5:
ans_str = defined_answer
return defined_answer
else:
ans_list = [0, 0, 0, 0, 0]
ans_str = ""
digit_kari = 0
count = 0
check = 0
while count < 5:
if count == 0:
ans_list[count] = random.randint(0,15)
count += 1
else:
digit_kari = random.randint(0,15)
for j in range(count):
if ans_list[j] == digit_kari:
check = -1
if check == 0:
ans_list[count] = digit_kari
count += 1
else:
check = 0
for i in range(5):
ans_str += str(hex(ans_list[i]))[2]
print("answer:"+ans_str) #あらかじめ答えを知りたいときのみ有効化する
return ans_str | fa19b2e28864d4c09458582d6dea80b81b3426f6 | 3,652,134 |
def truncate_dataset_position(filename, joint_type="w", threshold=0.01, directory="./2_smoothed/"):
"""
Truncates dataset **with raw position data** from last zero value before maximum velocity to following zero value.
:param filename: Input filename of position dataset
:param joint_type: Chooses which joint type is used to truncate the whole dataset (w, e, gh)
:param threshold: factor for maximum velocity, every value below threshold*v_max is set to zero.
Threshold=0 uses original dataset.
:param directory: directory of files
:return: new truncated dataset as dataframe, indexes stay the same
"""
dataset = open_dataset_pandas(filename, directory=directory)
dataset_velocity = generate_velocity_dataframe(filename, directory)
[index_left, index_right] = \
find_nearest_minima_from_maximum(dataset_velocity, joint_type=joint_type, threshold=threshold)
truncated_dataset = dataset.truncate(before=index_left, after=index_right)
if len(truncated_dataset) > 150 and threshold < 0.3:
return truncate_dataset_position(filename, joint_type=joint_type, threshold=threshold+0.01, directory=directory)
print(f"{filename};{threshold};{len(truncated_dataset)}")
truncated_dataset = truncated_dataset.reset_index(drop=True)
return truncated_dataset, threshold | 266a02a35f6f07e488629513f7252e16007d38cb | 3,652,136 |
def quadric_errors_representative(bucket):
"""
Quadric errors representative function.
:param bucket: bucket to calculate representative from.
:type bucket: Bucket
:return: bucket's representative vertex coordinates
:rtype: tuple(float, float, float)
"""
A = np.zeros((3, 3))
b = np.zeros((3, 1))
faces_set = set()
faces = []
for vertex in bucket.original_vertices: # type: Polyhedron_3_Vertex_handle
circulator = vertex.vertex_begin() # type: Polyhedron_3_Halfedge_around_vertex_circulator
for i in range(vertex.vertex_degree()):
he = circulator.next() # type: Polyhedron_3_Halfedge_handle
if he.is_border():
continue
f = he.facet()
facet_circulator = f.facet_begin() # type: Polyhedron_3_Halfedge_around_facet_circulator
vertices = []
for j in range(3):
facet_he = facet_circulator.next() # type: Polyhedron_3_Halfedge_handle
vertices.append(tuple([float(x) for x in str(facet_he.vertex().point()).split()]))
triangle_already_added = False
n = len(vertices)
for permutation in [[vertices[i - j] for i in range(n)] for j in range(n)]:
if tuple(permutation) in faces_set:
triangle_already_added = True
break
faces_set.add(tuple(permutation))
if triangle_already_added:
continue
face = []
for v in vertices:
face.append(v)
faces.append(face)
for face in faces:
p1 = np.array(face[0])
p2 = np.array(face[1])
p3 = np.array(face[2])
normal = np.reshape(np.cross((p2 - p1), (p3 - p1)), (3, 1))
normal_norm = norm(normal)
normal /= normal_norm
normal_t = normal.transpose()
dist = np.dot(normal_t, p1)
A += np.dot(normal, normal_t)
b += dist * normal
pinv_A = pinv(A)
representative = np.dot(pinv_A, b)
return tuple([representative[0][0], representative[1][0], representative[2][0]]) | d613c8cdf7acb5add81d879e52a3c127d3731980 | 3,652,137 |
import time
def getMonthTicks(start, end, increment, offset=0):
"""
Create a set of matplotlib compatible ticks and tick labels
for every `increment` month in the range [start, end],
beginning at the month of start + `offset` months.
"""
xLabels = []
xTicks = []
y, m, d = helpers.yearmonthday(start)
def normalize(y, m):
if m > 12:
m -= 12
y += 1
elif m < 0:
m += 12
y -= 1
return y, m
def nextyearmonth(y, m):
m += increment
return normalize(y, m)
y, m = normalize(y, m+offset)
tick = helpers.mktime(y, m)
end = end + C.DAY*120 # Make a few extra months worth.
while True:
xTicks.append(tick)
xLabels.append(time.strftime("%b '%y", time.gmtime(tick)))
y, m = nextyearmonth(y, m)
tick = helpers.mktime(y, m)
if tick > end:
break
return xTicks, xLabels | 398c8542b5cce3614b1efd9306b05c6ce1f6f185 | 3,652,138 |
def entropy_layer(inp, theta, num_samples, sample_init, sample_const, train_vect):
""" Entropy PersLay
WARNING: this function assumes that padding values are zero
"""
bp_inp = tf.einsum("ijk,kl->ijl", inp, tf.constant(np.array([[1.,-1.],[0.,1.]], dtype=np.float32)))
sp = tf.get_variable("s", shape=[1, 1, num_samples], initializer=sample_init, trainable=train_vect) if not sample_const else tf.get_variable("s", initializer=sample_init)
L, X, Y = bp_inp[:, :, 1:2], bp_inp[:, :, 0:1], bp_inp[:, :, 0:1] + bp_inp[:, :, 1:2]
LN = tf.multiply(L, 1. / tf.expand_dims(tf.matmul(L[:,:,0], tf.ones([L.shape[1],1])), -1))
entropy_terms = tf.where(LN > 0., -tf.multiply(LN, tf.log(LN)), LN)
return tf.multiply(entropy_terms, 1. / ( 1. + tf.exp( -theta * (.5*(Y-X) - tf.abs(sp - .5*(Y+X))) ) )) | d9f1155e576f382abfc467ed8704219a5017260d | 3,652,139 |
def set_version_code(data):
"""
Utility function to set new versionCode
"""
match = version_code_pattern.search(data)
if not match:
raise ValueError('Version code not found')
version_code = int(match.group('value'))
next_version_code = '\g<key> {}'.format(version_code + 1)
return version_code_pattern.sub(next_version_code, data) | f94392a06cbb4cda852ad83c8afcd3bae0603b52 | 3,652,140 |
import logging
from datetime import datetime
def add_resource(label, device_type, address, userid, password, rackid='', rack_location='',
ssh_key=None, offline=False):
""" Add device to the list of devices in the configuration managed
Args:
label: label for device
device_type: type of device from device enumeration
address: IP address of device
userid: string with device userid
password: string with device password (or password for ssh key)
rackid: string identify rack id, if not specified will default to management rack
rack:_location string identifying rack location
ssh_key: ssh key string
offline: Add the resource even if it can't be contacted
Returns:
RC: integer return code
Message: string with message associated with return code
"""
_method_ = 'resource_mgr.add_resource'
label = label.strip()
address = address.strip()
session = persistent_mgr.create_database_session()
if not offline:
ipv4, hostname = _check_address(address)
else:
ipv4 = address
hostname = ""
# adding default hostname for cases where dns doesn't resolve the address
# we need *some* hostname to use on Nagios configuration
if hostname == "":
hostname = address
rc, message = validate_address(ipv4)
if rc != 0:
return rc, message
rc, message = validate_label(label)
if rc != 0:
return rc, message
if not offline:
(validate_ret, device_type, mtm, serialnum, version, architecture) = validate(
ipv4, userid, password, device_type, ssh_key)
if validate_ret != 0:
logging.error(
"%s::failed to add device, validate device(%s) return value(%d).",
_method_, label, validate_ret)
error_message = None
if validate_ret == 1:
error_message = _("Failed to connect the device.")
elif validate_ret == 2:
error_message = _("The userid/password combination is not valid.")
elif validate_ret == 3:
error_message = _("No plugin capable of managing device was found.")
elif validate_ret == 109:
error_message = _("Connect timeout.")
return validate_ret, error_message
else:
if _check_device_exist_by_props(session, device_type, mtm, serialnum):
logging.error("%s::failed to add device, device(machine-type-model=%s, "
"serial-number=%s) is already managed.", _method_, mtm, serialnum)
error_message = _("The device is not added, a device with the same serial number "
"and machine type model is found in the configuration file.")
return 110, error_message
# figure out the rack ID to add the device under
if rackid:
rack = persistent_mgr.get_rack_by_id(session, rackid)
else:
# don't have a rack id. find first the rack and assign it there
try:
racks_info = persistent_mgr.get_all_racks(session)
rack = racks_info[0]
except IndexError:
# No rack exist, create one
rack = Rack()
rack.label = "Default"
persistent_mgr.add_racks(session, [rack])
device_info = Resource()
device_info.rack = rack
device_info.eia_location = rack_location
device_info.machine_type_model = mtm
device_info.serial_number = serialnum
device_info.address = ipv4
device_info.hostname = hostname
device_info.userid = userid
if password and not ssh_key:
device_info.password = persistent_mgr.encrypt_data(password)
device_info.label = label
device_info.resource_type = device_type
device_info.version = version
device_info.architecture = architecture
device_info.status = constants.access_status.SUCCESS.value
device_info.statusTime = datetime.utcnow()
# we are adding the device after validation, set validated.
device_info.validated = True
hooks = _load_inventory_device_plugins()
hook_name = 'unknown' # keeps pylint happy
try:
for hook_name, hook_plugin in hooks.items():
hook_plugin.add_device_pre_save(device_info)
except Exception as e:
logging.exception(e)
message = _("Before device was added. Error in plugin (%s): %s") % (hook_name, e)
return 102, message
persistent_mgr.add_devices(session, [device_info])
if ssh_key:
key_info = Key()
key_info.resource = device_info
key_info.type = "RSA"
key_info.value = ssh_key
if password:
key_info.password = persistent_mgr.encrypt_data(password)
persistent_mgr.add_ssh_keys(session, [key_info])
try:
for hook_name, hook_plugin in hooks.items():
hook_plugin.add_device_post_save(device_info)
except Exception as e:
logging.exception(e)
message = _("After device was added. Error in plugin (%s): %s") % (hook_name, e)
if not message:
message = _("Added device successfully.")
session.close()
return 0, message | b94dc65fdf864e4f5226d6793702355a9cbe1e46 | 3,652,141 |
def kegg_df_to_smiles(kegg_df, column_name):
"""
Args:
kegg_df : pandas dataframe with SID numbers in the third column
Returns:
kegg_df : modified with a fourth column containing CID and fifth column containing SMILES
unsuccessful_list : list of SIDs for which no CID or SMILES were found
"""
res = []
cid_list = []
unsuccessful_list = []
for i in range(len(kegg_df)):
# cell index of desired SID
sid = kegg_df.loc[i, column_name]
try:
smile_result = sid_to_smiles(sid)[0]
res.append(smile_result)
cid_result = sid_to_smiles(sid)[1]
cid_list.append(cid_result)
except BaseException:
res.append('none')
cid_list.append('none')
unsuccessful_list.append(sid)
pass
kegg_df.insert(0, column='CID', value=cid_list)
# Change this 2 to the number where the smiles column should be
kegg_df.insert(1, column='SMILES', value=res)
# kegg_df.to_csv(r'../datasets/df_cleaned_kegg_with_smiles.csv')
return kegg_df, unsuccessful_list | 09c4f3af98bb287348c20fe0fe7e3ce0eb63e6fa | 3,652,142 |
import sqlite3
async def get_user(user_id: int) -> User:
"""Gets user settings.
Returns
-------
User object
Raises
------
sqlite3.Error if something happened within the database.
exceptions.NoDataFoundError if no user was found.
LookupError if something goes wrong reading the dict.
Also logs all errors to the database.
"""
table = 'settings_user'
function_name = 'get_user'
sql = 'SELECT * FROM settings_user where user_id=?'
try:
cur = ARCHMAGE_DB.cursor()
cur.row_factory = sqlite3.Row
cur.execute(sql, (user_id,))
record = cur.fetchone()
except sqlite3.Error as error:
await log_error(
INTERNAL_ERROR_SQLITE3.format(error=error, table=table, function=function_name, sql=sql)
)
raise
if not record:
raise exceptions.NoDataFoundError('User not in database')
try:
user_settings = User(
user_id = record['user_id'],
target_enchant = record['target_enchant'],
)
except Exception as error:
await log_error(
INTERNAL_ERROR_LOOKUP.format(error=error, table=table, function=function_name, record=record)
)
raise LookupError
return user_settings | e754113b0d2b4791c6660f9b4e7122144f1638b6 | 3,652,143 |
def _valid_optimizer_args(cfg_user, logger):
"""
Validates the "optimizer" parameters of a json configuration file used for training.
The function returns False if an error has occurred and True if all settings have passed the check.
:param cfg_user: EasyDict, json configuration file imported as dictionary
:param logger: logger instance
:return: boolean, True if no errors have been detected, False otherwise
"""
error = False
if 'optimizer' in cfg_user:
if not all_keys_known(cfg_user.optimizer, arguments.OPTIMIZER_KEYS, logger):
error = True
if 'name' not in cfg_user.optimizer:
logger.error(f"The optimizer is not specified. Choose among {arguments.OPTIMIZERS} to specify 'name'.\n")
error = True
else:
if cfg_user.optimizer.name not in arguments.OPTIMIZERS:
logger.error(f"Unknown optimizer '{cfg_user.optimizer.name}'. Choose among {arguments.OPTIMIZERS} "
"to specify 'name'.\n")
error = True
if 'learning_rate' in cfg_user.optimizer and cfg_user.optimizer.learning_rate <= 0:
logger.error("Invalid value for the argument 'learning_rate': "
f"{cfg_user.optimizer.learning_rate}. Specify a positive number.\n")
error = True
if 'weight_decay' in cfg_user.optimizer and cfg_user.optimizer.weight_decay <= 0:
logger.error("Invalid value for the argument 'weight_decay': "
f"{cfg_user.optimizer.weight_decay}. Specify a positive number.\n")
error = True
if error:
logger.info('\n')
else:
logger.info('Settings check: ok.\n\n')
return not error | 126d66cc04d7bafc5ef91a9d365cdca34d0fd36a | 3,652,144 |
def delete_host_network(host_id, host_network_id):
"""Delete host network."""
data = _get_request_data()
return utils.make_json_response(
200,
host_api.del_host_network(
host_id, host_network_id, user=current_user, **data
)
) | 047ffb411e8dfbde1a818ba31874280dfa113aa0 | 3,652,145 |
def rrms_error(y: np.array, y_hat: np.array) -> float:
"""
Computes the RRMS error of an estimation.
:param y: true parameters as numpy array
:param y_hat: estimated parameters as numpy array
:return: Frobenius norm of the relative estimation error, as percentage
"""
return fro_error(y, y_hat) / np.linalg.norm(y, ('fro' if len(y.shape) > 1 else 2)) * 100 | 0b534c5b8047bd77a1873a604f3896eb597d7267 | 3,652,146 |
import locale
def create():
"""Creates new quiz and stores information about it in database."""
if request.method == "GET":
return render_template("quizzes/create.html")
error = None
questions = []
quiz_name = None
if isinstance(request.json, dict):
for quiz in request.json:
quiz_name = quiz
for question_text in request.json[quiz_name]:
question_options = request.json[quiz_name][question_text]
questions.append(Question(text=question_text, options=question_options))
else:
error = locale.error_wrong_data
new_quiz = Quiz(author_id=g.user["id"], name=quiz_name, questions=questions)
errors = new_quiz.validate()
if error or errors:
error_msg = "\n".join(filter(None, [error, *errors]))
return jsonify(error=error_msg)
else:
db = get_db()
new_quiz.add_to_db()
db.commit()
return jsonify(result="success", url=redirect(url_for("quizzes.index")).headers["Location"]) | 002915c6e8c766053623bf70ab66225ddfdc0883 | 3,652,147 |
from typing import Dict
def get_teams_from_account(client: FrameioClient) -> Dict:
"""
Builds a list of teams for the account. Note: the API offers two strategies to fetch an account's teams,
`'get_teams`` and `get_all_teams`. Using `get_teams`, we'll pull only the teams owned by the account_id,
disregarding teams the user belongs to but does not own. More info: https://docs.frame.io/docs/directory-lists-and-file-trees#2-fetch-the-accounts-teams
"""
acct = client.users.get_me()
acct_id = acct["account_id"]
team_name_kv = dict()
for team in client.teams.list(acct_id):
team_name_kv[team["id"]] = team["name"]
return team_name_kv | ebbc73e2aad3f6a0833dad469a8b952cc8eef21b | 3,652,148 |
def verify_mfib_vrf_hardware_rate(
device, vrf, num_of_igmp_groups, var, rate_pps, max_time=60, check_interval=10):
"""Verify mfib vrf hardware rate
Args:
device ('obj'): Device object
neighbors (`list`): neighbors to be verified
max_time (`int`, optional): Max time, default: 30
check_interval (`int`, optional): Check interval, default: 10
"""
res = True
timeout = Timeout(max_time, check_interval)
while timeout.iterate():
try:
output=device.parse("show ip mfib vrf {vrf} active".format(vrf=vrf))
except SubCommandFailure as e:
timeout.sleep()
continue
ip_list,hd_rt=[],0
##Verify wether the ips learnet have expected harware rate or not
for ip in output.q.get_values('groups'):
hd_rt=output.q.contains(ip).get_values('hw_rate_utilized')[0]
rate1 = int(rate_pps)/int(num_of_igmp_groups)
max_r = int(rate1)+int(var)
min_r = int(rate1)-int(var)
if hd_rt>= min_r and hd_rt<=max_r:
ip_list.append(ip)
else:
log.error("The ip {ip} has unexpected hardware rate {hd_rt}, while expected should be between {min_r} and {max_r}".format(ip=ip, hd_rt=hd_rt, min_r=min_r, max_r=max_r))
res = False
if res:
ips=",".join(ip_list)
log.info("ip {ip_list} have expected hardware rate {hd_rt}".format(ip_list=ips,hd_rt=hd_rt))
return True
timeout.sleep()
return res | 402c178acff9d58fec01e96efc08d205a5ff9c5e | 3,652,149 |
import math
def sieve(n):
"""
Returns a list with all prime numbers up to n.
>>> sieve(50)
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47]
>>> sieve(25)
[2, 3, 5, 7, 11, 13, 17, 19, 23]
>>> sieve(10)
[2, 3, 5, 7]
>>> sieve(9)
[2, 3, 5, 7]
>>> sieve(2)
[2]
>>> sieve(1)
[]
"""
l = [True] * (n + 1) # noqa: E741
prime = []
start = 2
end = int(math.sqrt(n))
while start <= end:
# If start is a prime
if l[start] is True:
prime.append(start)
# Set multiples of start be False
for i in range(start * start, n + 1, start):
if l[i] is True:
l[i] = False
start += 1
for j in range(end + 1, n + 1):
if l[j] is True:
prime.append(j)
return prime | f6c930c604839ba1872bd3168c76b353606ee8ee | 3,652,150 |
def is_trueish(expression: str) -> bool:
"""True if string and "True", "Yes", "On" (ignorecase), False otherwise"""
expression = str(expression).strip().lower()
return expression in {'true', 'yes', 'on'} | 7d958c068281deb68de7665dc1eeb07acf5e941f | 3,652,151 |
def u_onequbit_h(qc: qiskit.QuantumCircuit, thetas, wire: int):
"""Return a simple series of 1 qubit - gate which is measured in X-basis
Args:
- qc (QuantumCircuit): Init circuit
- thetas (Numpy array): Parameters
- wire (Int): position that the gate carries on
Returns:
- QuantumCircuit: The circuit which have added gates
"""
if isinstance(wire, int) != True:
wire = (wire['wire'])
qc.rz(thetas[0], wire)
qc.rx(thetas[1], wire)
qc.rz(thetas[2], wire)
qc.h(wire)
return qc | 3bcb5e9bb61abe8eb0d150ba539f2b21d4089589 | 3,652,152 |
def get_manager() -> ArchiveManager:
"""
Returns the object storage manager for the archive subsys
:return:
"""
global _manager_singleton
if _manager_singleton is None:
raise Exception("Not initialized. Call init_archive_manager")
return _manager_singleton | c43edc20af5b3e9a18442cfbb4bdefa7e7442d1d | 3,652,153 |
def require_password_and_profile_via_email(
strategy, backend, user=None, flow=None, current_partial=None, *args, **kwargs
): # pylint: disable=unused-argument
"""
Sets a new user's password and profile
Args:
strategy (social_django.strategy.DjangoStrategy): the strategy used to authenticate
backend (social_core.backends.base.BaseAuth): the backend being used to authenticate
user (User): the current user
flow (str): the type of flow (login or register)
current_partial (Partial): the partial for the step in the pipeline
Raises:
RequirePasswordAndProfileException: if the user hasn't set password or name
"""
if backend.name != EmailAuth.name or flow != SocialAuthState.FLOW_REGISTER:
return {}
data = strategy.request_data()
profile = user.profile
with transaction.atomic():
if "name" in data:
profile = profile_api.ensure_profile(user, {"name": data["name"]})
if "password" in data:
user.set_password(data["password"])
user.save()
if not user.password or not profile.name:
raise RequirePasswordAndProfileException(backend, current_partial)
return {"user": user, "profile": profile or user.profile} | 63d7d6e61696e5a48d297b8911f1b48cc5c82e5e | 3,652,154 |
import copy
def plot_feat_barplot(feat_data: pd.DataFrame,
top_x_feats: int = 15,
plot_features: dict = None
):
"""Plots local feature explanations
Parameters
----------
feat_data: pd.DataFrame
Feature explanations
top_x_feats: int
The number of feature to display.
plot_features: dict
Dict containing mapping between model features and display features
"""
feat_data = copy.deepcopy(feat_data)
if plot_features:
plot_features['Pruned Events'] = 'Pruned Events'
feat_data['Feature'] = feat_data['Feature'].apply(lambda x: plot_features[x])
feat_data['sort_col'] = feat_data['Shapley Value'].apply(lambda x: abs(x))
if top_x_feats is not None and feat_data.shape[0] > top_x_feats:
sorted_df = feat_data.sort_values('sort_col', ascending=False)
cutoff_contribution = abs(sorted_df.iloc[4]['Shapley Value'])
feat_data = feat_data[np.logical_or(feat_data['Explanation'] >= cutoff_contribution, feat_data['Explanation'] <= -cutoff_contribution)]
a = alt.Chart(feat_data).mark_bar(size=15, thickness=1).encode(
y=alt.Y("Feature", axis=alt.Axis(title="Feature", labelFontSize=15,
titleFontSize=15, titleX=-61),
sort=alt.SortField(field='sort_col', order='descending')),
x=alt.X('Shapley Value', axis=alt.Axis(grid=True, title="Shapley Value",
labelFontSize=15, titleFontSize=15),
scale=alt.Scale(domain=[-0.1, 0.4])),
)
line = alt.Chart(pd.DataFrame({'x': [0]})).mark_rule(
color='#798184').encode(x='x')
feature_plot = (a + line).properties(
width=190,
height=225
)
return feature_plot | 4fd976f4846163a97690143da18a8e235d1b940c | 3,652,155 |
async def post_ir_remote_key(device_id: str, remote_id: str, payload: dict) -> dict:
# fmt: off
"""
Trigger key / code on the remote bound to IR device. There are 2 types of keys on Tuya IR
devices:
* native - out of the box keys, provided with remotes for different brands
* custom - DIY keys learned by the IR device
Body for "custom" key (e.g. DIY):
{
"type": "custom",
"code": "<value>"
}
Body for "native" key:
{
"type": "custom",
"key": "<value>"
}
:param device_id: Unique id of the Tuya device (in Tuya's API it is called 'infrared_id')
:param remote_id: Unique remote id bound to the IR device, returned by 'get_ir_device_remotes'.
:param payload: Request body in JSON format
:return: Dictionary with HTTP response
"""
# fmt: on
url = _url_format(id=device_id, endpoint="remotes", url=IR_URL)
# {"code" : <value>} for "custom", {"key": <value>} for "native"
if payload.get("type", "native") == "custom":
lc_url = f"{url}/{remote_id}/learning-codes"
response = _request(url=lc_url, payload=payload)
else:
k_url = f"{url}/{remote_id}/command"
response = _request(url=k_url, payload=payload)
return response | 9c0fb945076c49564fa65f48971e0df0b2131794 | 3,652,156 |
import requests
import traceback
def login():
"""
The function for the front-end client to log in.
Use the following command to test:
$ curl -d '{"custom_id":"id"}' -H "Content-Type: application/json" -X POST http://0.0.0.0:5000/login/
Parameters
----------
google_id_token : str
The token obtained from the Google Sign-In API.
client_id : str
The client ID string returned by the Google Analytics tracker or created by the front-end client.
Returns
-------
user_token : str
The encoded JWT that stores user information.
"""
client_id = None
request_json = request.get_json()
if request_json is not None:
if "google_id_token" in request_json:
# google_id_token is obtained from the Google Sign-In API
google_id_token = request_json["google_id_token"]
# Verify the google_id_token using Google Sign-In API
try:
id_info = id_token.verify_oauth2_token(google_id_token,
requests.Request(), config.GOOGLE_SIGNIN_CLIENT_ID)
# Token is valid
client_id = "google.%s" % id_info["sub"]
except ValueError:
traceback.print_exc()
e = InvalidUsage("Invalid Google ID token.", status_code=401)
return handle_invalid_usage(e)
except:
traceback.print_exc()
e = InvalidUsage(traceback.format_exc(), status_code=401)
return handle_invalid_usage(e)
else:
if "client_id" in request_json:
# obtained from the Google Analytics tracker or created by the front-end client
client_id = request_json["client_id"]
# Get user id by client id, and issued an user jwt
if client_id is None:
e = InvalidUsage("Must have either 'google_id_token' or 'client_id'.", status_code=400)
return handle_invalid_usage(e)
else:
user_token = get_user_token_by_client_id(client_id)
if user_token is None:
e = InvalidUsage("Permission denied.", status_code=403)
return handle_invalid_usage(e)
else:
return_json = {"user_token": user_token}
return jsonify(return_json) | a8de77d1f9f2c64fca6927c2d5fd29fa4133b31b | 3,652,157 |
def listThingTypes():
"""
Return a list of C{unicode} strings each of which gives the name of a type
which can be created with the create command.
"""
return sorted([type.type for type in getPlugins(IThingType, imaginary.plugins)]) | 31a98e58d2fd70cc7a1449e2e50946decbf98244 | 3,652,158 |
import json
def _parse_boolean(value):
"""
:param value: The string to parse
:type value: str
:returns: The parsed value
:rtype: bool
"""
try:
boolean = json.loads(value)
if boolean is None or isinstance(boolean, bool):
return boolean
else:
raise DCOSException(
'Unable to parse {!r} as a boolean'.format(value))
except ValueError as error:
logger.exception('Error parsing value as a JSON boolean')
msg = 'Unable to parse {!r} as a boolean: {}'.format(value, error)
raise DCOSException(msg) | 60dcd4ed8663823fdf4df5c27e032c6693a11cc8 | 3,652,159 |
import torch
import time
def train(args, model, train_data_loader, dev_data_loader, accuracy, device):
"""
Train the current model
Keyword arguments:
args: arguments
model: model to be trained
train_data_loader: pytorch build-in data loader output for training examples
dev_data_loader: pytorch build-in data loader output for dev examples
accuracy: previous best accuracy
device: cpu of gpu
"""
model.train()
optimizer = torch.optim.Adamax(model.parameters())
criterion = nn.CrossEntropyLoss()
print_loss_total = 0
epoch_loss_total = 0
start = time.time()
#### modify the following code to complete the training funtion
for idx, batch in enumerate(train_data_loader):
question_text = batch['text'].to(device)
question_len = batch['len']
labels = batch['labels']
#### Your code here
clip_grad_norm_(model.parameters(), args.grad_clipping)
print_loss_total += loss.data.numpy()
epoch_loss_total += loss.data.numpy()
if idx % args.checkpoint == 0 and idx > 0:
print_loss_avg = print_loss_total / args.checkpoint
print('number of steps: %d, loss: %.5f time: %.5f' % (idx, print_loss_avg, time.time()- start))
print_loss_total = 0
curr_accuracy = evaluate(dev_data_loader, model, device)
if accuracy < curr_accuracy:
torch.save(model, args.save_model)
accuracy = curr_accuracy
return accuracy | fa5953bc4fc8554b1d0164eae96c1fd9ce949068 | 3,652,160 |
def get_category_embeddings(word_table, embeds):
"""Calculate embeddings from word labels for each category."""
category_words = read_categories_as_json()
word_ids = word_table.lookup(tf.constant(category_words))
glove_embeds = tf.nn.embedding_lookup(embeds, word_ids)
# Calculate category embedding by summing word vectors in each category
# tf.reduce_sum is used as the category embedding will be normalized later
category_embeds = tf.reduce_sum(glove_embeds, axis=1)
expand_category_embeds = tf.expand_dims(category_embeds, axis=1)
return expand_category_embeds | 23fbb867ec1390c49b0a217968d44f2abe189e57 | 3,652,161 |
import re
import requests
def find_download_links(soup, title, language):
"""Examine all download links per law document and create respective filepaths."""
vbfile = soup.find("div", "vbFile")
fulltext = soup.find("div", "fulltext")
# check if file attachment elements exist
if vbfile is not None:
attach = vbfile.select("ul li a")
metadata_list = [] # collect metadata for link and download_path
# some laws have multiple doc links, so we want to alter the saved doc's filename to prevent overwriting
multiple = len(attach) > 1
if multiple:
title += "__"
i = 1
# loop through every available file attachment
for a in attach:
# ignore "Xem nhanh"/Quick View links as they're invalid
if "iFrame" in a["href"]:
continue
# all other links are javascript
fpath = re.findall(r"([^']*)" , a["href"])[6]
url = BASE_URL + fpath
doc = requests.get(url)
ext = re.split("\.", fpath)[-1]
# some laws have multiple doc links, so we alter the saved doc's filename to prevent overwriting
if multiple:
title = title[:-1] + str(i)
i += 1
fname = create_filename(title, language, ext)
with open(fname, "wb") as f:
for chunk in doc.iter_content(1024 * 1024):
f.write(chunk)
print("downloaded", ext, "for", title)
metadata_list.append({"link": url, "download_path": fname, "language": language}) # alternative for "download_path": [fname.index("data"):]
return metadata_list
# if file attachment elements don't exist, scrape the text off the page and save as txt
elif fulltext is not None:
doc = fulltext.get_text()
fname = create_filename(title, language, "txt")
with open(fname, "w", encoding = "utf-8") as f:
f.write(doc)
print("downloaded txt for", title)
return [{"download_path": fname, "language": language}] # alternative for "download_path": [fname.index("data"):]
# if neither exists, don't save law document
else:
return None | 22c017c20ab2a3cfa9cfeb1e43ec2a24e9ff0cc7 | 3,652,162 |
import random
def fake_feature_date(days=365):
"""Generate fake feature_date."""
start_date = date.today()
random_number_of_days = random.randrange(days)
_date = start_date + timedelta(days=random_number_of_days)
return _date.strftime("%Y-%m-%d") | f9706d353eef0f531ab74b585cf966349faf4003 | 3,652,163 |
def export_graph(checkpoint_path, output_nodes):
"""
Export a graph stored in a checkpoint as a *.pb file.
:param checkpoint_path: The checkpoint path which should be frozen.
:param output_nodes: The output nodes you care about as a list of strings (their names).
:return:
"""
if not tf.gfile.Exists(checkpoint_path):
raise AssertionError(
"Export directory doesn't exists. Please specify an export "
"directory: %s" % checkpoint_path)
if not output_nodes:
print("You need to supply the name of a node to --output_node_names.")
return -1
# We retrieve our checkpoint fullpath
checkpoint = tf.train.get_checkpoint_state(checkpoint_path)
input_checkpoint = checkpoint.model_checkpoint_path
# We precise the file fullname of our freezed graph
output_graph = checkpoint_path + "/frozen_model.pb"
# We clear devices to allow TensorFlow to control on which device it will load operations
clear_devices = True
# We start a session using a temporary fresh Graph
with tf.Session(graph=tf.Graph()) as sess:
# We import the meta graph in the current default Graph
saver = tf.train.import_meta_graph(input_checkpoint + '.meta', clear_devices=clear_devices)
# We restore the weights
saver.restore(sess, input_checkpoint)
# We use a built-in TF helper to export variables to constants
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess, # The session is used to retrieve the weights
tf.get_default_graph().as_graph_def(), # The graph_def is used to retrieve the nodes
output_nodes # The output node names are used to select the useful nodes
)
# Finally we serialize and dump the output graph to the filesystem
with tf.gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
print("%d ops in the final graph." % len(output_graph_def.node))
return output_graph_def | b83b832c0b0bea9afc0e84843061bd8091e63fc8 | 3,652,165 |
def GetTrackingBranch(git_repo, branch=None, for_checkout=True, fallback=True,
manifest=None, for_push=False):
"""Gets the appropriate push branch for the specified directory.
This function works on both repo projects and regular git checkouts.
Assumptions:
1. We assume the manifest defined upstream is desirable.
2. No manifest? Assume tracking if configured is accurate.
3. If none of the above apply, you get 'origin', 'master' or None,
depending on fallback.
Args:
git_repo: Git repository to operate upon.
branch: Find the tracking branch for this branch. Defaults to the
current branch for |git_repo|.
for_checkout: Whether to return localized refspecs, or the remotes
view of it.
fallback: If true and no remote/branch could be discerned, return
'origin', 'master'. If False, you get None.
Note that depending on the remote, the remote may differ
if for_push is True or set to False.
for_push: Controls whether the remote and refspec returned is explicitly
for pushing.
manifest: A Manifest instance if one is available, else a
ManifestCheckout is created and used.
Returns:
A RemoteRef, or None.
"""
result = GetTrackingBranchViaManifest(git_repo, for_checkout=for_checkout,
manifest=manifest, for_push=for_push)
if result is not None:
return result
if branch is None:
branch = GetCurrentBranch(git_repo)
if branch:
result = GetTrackingBranchViaGitConfig(git_repo, branch,
for_checkout=for_checkout)
if result is not None:
if (result.ref.startswith('refs/heads/') or
result.ref.startswith('refs/remotes/')):
return result
if not fallback:
return None
if for_checkout:
return RemoteRef('origin', 'refs/remotes/origin/master')
return RemoteRef('origin', 'master') | 0bb9bb99e03cebc800a0f60f89a8de5178fdffdc | 3,652,166 |
def inception_crop(image, **kw):
"""Perform an "inception crop", without resize."""
begin, size, _ = tf.image.sample_distorted_bounding_box(
tf.shape(image), tf.zeros([0, 0, 4], tf.float32),
use_image_if_no_bounding_boxes=True, **kw)
crop = tf.slice(image, begin, size)
# Unfortunately, the above operation loses the depth-dimension. So we need
# to Restore it the manual way.
crop.set_shape([None, None, image.shape[-1]])
return crop | c5d8dd420055ad82e64bc61204fb6218c7621489 | 3,652,167 |
import json
import logging
def _clear_port_access_clients_limit_v1(port_name, **kwargs):
"""
Perform GET and PUT calls to clear a port's limit of maximum allowed number of authorized clients.
:param port_name: Alphanumeric name of Port
:param kwargs:
keyword s: requests.session object with loaded cookie jar
keyword url: URL in main() function
:return: True if successful, False otherwise
"""
port_name_percents = common_ops._replace_special_characters(port_name)
port_data = port.get_port(port_name_percents, depth=0, selector="configuration", **kwargs)
port_data.pop('port_access_clients_limit', None)
port_data.pop('name', None)
port_data.pop('origin', None)
port_data.pop('vrf', None)
target_url = kwargs["url"] + "system/ports/%s" % port_name_percents
put_data = json.dumps(port_data, sort_keys=True, indent=4)
response = kwargs["s"].put(target_url, data=put_data, verify=False)
if not common_ops._response_ok(response, "PUT"):
logging.warning("FAIL: Removing maximum allowable clients limit on Port '%s' failed with status code %d: %s"
% (port_name, response.status_code, response.text))
return False
else:
logging.info("SUCCESS: Removing maximum allowable clients limit on Port '%s' succeeded"
% port_name)
return True | 46827c7a7ac70dba971e6fc04e9d101a8ca9e8b6 | 3,652,168 |
from typing import cast
def cvGetHistValue_1D(hist, i1):
"""Returns pointer to histogram bin"""
return cast(cvPtr1D(hist.bins, i1), c_float_p) | 6b1a453696d8d1fbeda54900c1319e56a9133118 | 3,652,169 |
import re
def contains_order_by(query):
"""Returns true of the query contains an 'order by' clause"""
return re.search( r'order\s+by\b', query, re.M|re.I) is not None | 4f4eebadfd5dc4cb1121378db4ef5f68d27bf787 | 3,652,170 |
def detach(l):
"""\
Set a layer as detached, excluding it from gradient computation.
:param l: layer or list of layers to detach
:return: detached layer(s)
"""
# core module has multiple overloads for this:
# 1. detach(l) where l is a Layer and the return value is a Layer
# 2. detach(l) where l is a [Layer] and the return value is a [Layer]
return _eddl.detach(l) | d4ea7c1496bbcc918ad55c32c852332d4f892e31 | 3,652,171 |
import re
def __shorten_floats(source):
""" Use short float notation whenever possible
:param source: The source GLSL string
:return: The GLSL string with short float notation applied
"""
# Strip redundant leading digits
source = re.sub(re.compile(r'(?<=[^\d.])0(?=\.)'), '', source)
# Strip redundant trailing digits
return re.sub(re.compile(r'(?<=\d\.)0(?=\D)'), '', source) | 538645cde50e6c9a4ed3960cf6bbd177c5583381 | 3,652,172 |
def cnn_model(logits=False, input_ph=None, img_rows=28, img_cols=28,
channels=1, nb_filters=64, nb_classes=10):
"""
Defines a CNN model using Keras sequential model
:param logits: If set to False, returns a Keras model, otherwise will also
return logits tensor
:param input_ph: The TensorFlow tensor for the input
(needed if returning logits)
("ph" stands for placeholder but it need not actually be a
placeholder)
:param img_rows: number of row in the image
:param img_cols: number of columns in the image
:param channels: number of color channels (e.g., 1 for MNIST)
:param nb_filters: number of convolutional filters per layer
:param nb_classes: the number of output classes
:return:
"""
model = Sequential()
# Define the layers successively (convolution layers are version dependent)
if keras.backend.image_dim_ordering() == 'th':
input_shape = (channels, img_rows, img_cols)
else:
input_shape = (img_rows, img_cols, channels)
layers = [conv_2d(nb_filters, (5, 5), (1, 1), "same",
input_shape=input_shape),
Activation('relu'),
conv_2d(nb_filters, (5, 5), (1, 1), "valid"),
Activation('relu'),
Flatten(),
Dropout(0.25),
Dense(128),
Activation('relu'),
Dropout(0.5),
Dense(nb_classes)]
for layer in layers:
model.add(layer)
if logits:
logits_tensor = model(input_ph)
model.add(Activation('softmax'))
if logits:
return model, logits_tensor
else:
return model | 488b307f394f9184d094aeb0a71d75442534d4c7 | 3,652,173 |
def _mvnormal_halton(sample_shape,
mean,
randomized,
seed=None,
covariance_matrix=None,
scale_matrix=None,
validate_args=False,
dtype=None,
**kwargs):
"""Returns normal draws using Halton low-discrepancy sequences."""
random_type = (RandomType.HALTON_RANDOMIZED if randomized
else RandomType.HALTON)
return _mvnormal_quasi(sample_shape,
mean,
random_type,
seed=seed,
covariance_matrix=covariance_matrix,
scale_matrix=scale_matrix,
validate_args=validate_args,
dtype=dtype,
**kwargs) | f3795f4c40e809b461c60b43f118a57b1bb18ba3 | 3,652,174 |
def get_meals(bouts_sec: np.ndarray, max_gap_sec: float = 60.0, min_overlap: float = 0.25):
"""
Computes a sequence of meal intervals from a sequence of chewing-bout intervals.
:param bouts_sec: The sequence of chewing-bout intervals (see ``get_bouts`` output)
:param max_gap_sec: Maximum gap-duration that is merged between consecutive chewing-bouts
:param min_overlap: Minimum allowed overlap of chewing-bout duration with meal duration
:return: The 2-column (start & stop, in seconds) matrix of meals
"""
assert is_numpy_matrix(bouts_sec, cols=2)
assert isinstance(max_gap_sec, float)
assert isinstance(min_overlap, float)
meals_sec, orig_durations_sec = merge_gaps(bouts_sec, max_gap_sec, True)
overlap = orig_durations_sec / (meals_sec[:, 1] - meals_sec[:, 0])
return meals_sec[overlap >= min_overlap, :] | aa4d66d71614825a71daddbb169994e8d3c4aac7 | 3,652,175 |
def setPerformanceLevel(source, level):
"""Sets a given performance level for the GPU Core and Memory.
Args:
source: string containing word "core" or "mem"
level: an integer between 0-7 for core and 0-3 memory
Returns:
True - if action is sucessful.
False - not possible to apply configuration.
"""
if source == "core":
assert level in list(range(
0, 8)), "Core Performance Level betwen 0 and 7."
result = runDVFSscript("-P " + str(level))
if "ERROR" in result:
return False
elif source == "mem":
assert level in list(range(
0, 4)), "Core Performance Level betwen 0 and 3."
result = runDVFSscript("-p " + str(level))
if "ERROR" in result:
return False
else:
print("Not valid source used - core or mem")
return False
return True | ef13376acf51de0acb3491efcca731cdc3e569b3 | 3,652,176 |
from typing import OrderedDict
def elem_props_template_init(templates, template_type):
"""
Init a writing template of given type, for *one* element's properties.
"""
ret = OrderedDict()
tmpl = templates.get(template_type)
if tmpl is not None:
written = tmpl.written[0]
props = tmpl.properties
ret = OrderedDict((name, [val, ptype, anim, written]) for name, (val, ptype, anim) in props.items())
return ret | c31d5ca8224763701f44471f23f00454c4365240 | 3,652,177 |
def to_density(x, bins=5, bounds=None):
""""Turn into density based nb of bins"""
p_x = np.histogram(x, bins=bins, density=True, range=bounds)[0]
p_x = p_x / np.sum(p_x)
return p_x | ce4af24ef57ca466a0f5d4b96ffff13ee45ddddb | 3,652,178 |
from datetime import datetime
def pro_bar(ts_code='', api=None, start_date='', end_date='', freq='D', asset='E',
exchange='',
adj = None,
ma = [],
factors = None,
adjfactor = False,
contract_type = '',
retry_count = 3):
"""
BAR数据
Parameters:
------------
ts_code:证券代码,支持股票,ETF/LOF,期货/期权,港股,数字货币
start_date:开始日期 YYYYMMDD
end_date:结束日期 YYYYMMDD
freq:支持1/5/15/30/60分钟,周/月/季/年
asset:证券类型 E:股票和交易所基金,I:沪深指数,C:数字货币,FT:期货 FD:基金/O期权/H港股/中概美国/中证指数/国际指数
exchange:市场代码,用户数字货币行情
adj:复权类型,None不复权,qfq:前复权,hfq:后复权
ma:均线,支持自定义均线频度,如:ma5/ma10/ma20/ma60/maN
factors因子数据,目前支持以下两种:
vr:量比,默认不返回,返回需指定:factor=['vr']
tor:换手率,默认不返回,返回需指定:factor=['tor']
以上两种都需要:factor=['vr', 'tor']
retry_count:网络重试次数
Return
----------
DataFrame
code:代码
open:开盘close/high/low/vol成交量/amount成交额/maN均价/vr量比/tor换手率
期货(asset='X')
code/open/close/high/low/avg_price:均价 position:持仓量 vol:成交总量
"""
today= datetime.datetime.today().date()
today = str(today)[0:10]
start_date = '' if start_date is None else start_date
end_date = today if end_date == '' or end_date is None else end_date
ts_code = ts_code.strip().upper() if asset != 'C' else ts_code.strip().lower()
start_date = start_date.replace('-', '')
end_date = end_date.replace('-', '')
if len(freq.strip())>=3:
freq = freq.strip().lower()
else:
freq = freq.strip().upper() if asset != 'C' else freq.strip().lower()
asset = asset.strip().upper()
api = api if api is not None else pro_api()
for _ in range(retry_count):
try:
if asset == 'E':
if freq == 'D':
data = api.daily(ts_code=ts_code, start_date=start_date, end_date=end_date)
if factors is not None and len(factors) >0 :
ds = api.daily_basic(ts_code=ts_code, start_date=start_date, end_date=end_date)[['trade_date', 'turnover_rate', 'volume_ratio']]
ds = ds.set_index('trade_date')
data = data.set_index('trade_date')
data = data.merge(ds, left_index=True, right_index=True)
data = data.reset_index()
if ('tor' in factors) and ('vr' not in factors):
data = data.drop('volume_ratio', axis=1)
if ('vr' in factors) and ('tor' not in factors):
data = data.drop('turnover_rate', axis=1)
if freq == 'W':
data = api.weekly(ts_code=ts_code, start_date=start_date, end_date=end_date)
if freq == 'M':
data = api.monthly(ts_code=ts_code, start_date=start_date, end_date=end_date)
if 'min' in freq:
data = api.mins(ts_code=ts_code, start_time=start_date, end_time=end_date, freq=freq)
data['trade_date'] = data['trade_time'].map(lambda x: x.replace('-', '')[0:8])
data['pre_close'] = data['close'].shift(-1)
if adj is not None:
fcts = api.adj_factor(ts_code=ts_code, start_date=start_date, end_date=end_date)[['trade_date', 'adj_factor']]
data = data.set_index('trade_date', drop=False).merge(fcts.set_index('trade_date'), left_index=True, right_index=True, how='left')
if 'min' in freq:
data = data.sort_values('trade_time', ascending=False)
data['adj_factor'] = data['adj_factor'].fillna(method='bfill')
for col in PRICE_COLS:
if adj == 'hfq':
data[col] = data[col] * data['adj_factor']
if adj == 'qfq':
data[col] = data[col] * data['adj_factor'] / float(fcts['adj_factor'][0])
data[col] = data[col].map(FORMAT)
for col in PRICE_COLS:
data[col] = data[col].astype(float)
if adjfactor is False:
data = data.drop('adj_factor', axis=1)
if 'min' not in freq:
data['change'] = data['close'] - data['pre_close']
data['pct_chg'] = data['change'] / data['pre_close'] * 100
data['pct_chg'] = data['pct_chg'].map(lambda x: FORMAT(x)).astype(float)
else:
data = data.drop(['trade_date', 'pre_close'], axis=1)
elif asset == 'I':
if freq == 'D':
data = api.index_daily(ts_code=ts_code, start_date=start_date, end_date=end_date)
if freq == 'W':
data = api.index_weekly(ts_code=ts_code, start_date=start_date, end_date=end_date)
if freq == 'M':
data = api.index_monthly(ts_code=ts_code, start_date=start_date, end_date=end_date)
if 'min' in freq:
data = api.mins(ts_code=ts_code, start_time=start_date, end_time=end_date, freq=freq)
elif asset == 'FT':
if freq == 'D':
data = api.fut_daily(ts_code=ts_code, start_date=start_date, end_date=end_date, exchange=exchange)
if 'min' in freq:
data = api.mins(ts_code=ts_code, start_time=start_date, end_time=end_date, freq=freq)
elif asset == 'O':
if freq == 'D':
data = api.opt_daily(ts_code=ts_code, start_date=start_date, end_date=end_date, exchange=exchange)
if 'min' in freq:
data = api.mins(ts_code=ts_code, start_time=start_date, end_time=end_date, freq=freq)
elif asset == 'FD':
if freq == 'D':
data = api.fund_daily(ts_code=ts_code, start_date=start_date, end_date=end_date)
if 'min' in freq:
data = api.mins(ts_code=ts_code, start_time=start_date, end_time=end_date, freq=freq)
if asset == 'C':
if freq == 'd':
freq = 'daily'
elif freq == 'w':
freq = 'week'
data = api.coinbar(exchange=exchange, symbol=ts_code, freq=freq, start_dae=start_date, end_date=end_date,
contract_type=contract_type)
if ma is not None and len(ma) > 0:
for a in ma:
if isinstance(a, int):
data['ma%s'%a] = MA(data['close'], a).map(FORMAT).shift(-(a-1))
data['ma%s'%a] = data['ma%s'%a].astype(float)
data['ma_v_%s'%a] = MA(data['vol'], a).map(FORMAT).shift(-(a-1))
data['ma_v_%s'%a] = data['ma_v_%s'%a].astype(float)
data = data.reset_index(drop=True)
except Exception as e:
return None
else:
return data
raise IOError('ERROR.') | 8df29ce9ac89acabfb5109954f2deee515d707ca | 3,652,179 |
def ticket_qr_code(request, ticket_id):
""" Generates a qr code data url to validate a ticket with the id passed """
return segno.make(
validate_ticket_url(request, ticket_id),
micro=False
).svg_data_uri(scale=2) | 249b25d5d96651175e881284d31938fe56fe06dc | 3,652,181 |
def _iou(box_a, box_b):
"""
:param box_a: [c, A, 4]
:param box_b: [c, B, 4]
:return: [c, A, B] 两两之间的iou
"""
# 变成左上角坐标、右下角坐标
boxes1 = tf.concat([box_a[..., :2] - box_a[..., 2:] * 0.5,
box_a[..., :2] + box_a[..., 2:] * 0.5], axis=-1)
boxes2 = tf.concat([box_b[..., :2] - box_b[..., 2:] * 0.5,
box_b[..., :2] + box_b[..., 2:] * 0.5], axis=-1)
c = tf.shape(boxes1)[0]
A = tf.shape(boxes1)[1]
B = tf.shape(boxes2)[1]
box_a = tf.reshape(boxes1, (c, A, 1, 4))
box_b = tf.reshape(boxes2, (c, 1, B, 4))
expand_box_a = tf.tile(box_a, [1, 1, B, 1])
expand_box_b = tf.tile(box_b, [1, A, 1, 1])
# 两个矩形的面积
boxes1_area = (expand_box_a[..., 2] - expand_box_a[..., 0]) * (
expand_box_a[..., 3] - expand_box_a[..., 1])
boxes2_area = (expand_box_b[..., 2] - expand_box_b[..., 0]) * (
expand_box_b[..., 3] - expand_box_b[..., 1])
# 相交矩形的左上角坐标、右下角坐标
left_up = tf.maximum(expand_box_a[:, :, :, :2], expand_box_b[:, :, :, :2])
right_down = tf.minimum(expand_box_a[:, :, :, 2:], expand_box_b[:, :, :, 2:])
# 相交矩形的面积inter_area。iou
inter_section = tf.maximum(right_down - left_up, 0.0)
inter_area = inter_section[..., 0] * inter_section[..., 1]
union_area = boxes1_area + boxes2_area - inter_area
iou = inter_area / (union_area + 1e-9)
return iou | f385b2a4ddfd6dfcae7b6cc3067e229c3dff768b | 3,652,182 |
import torch
def subsequent_mask(size, device=device):
"""
Mask out subsequent positions. upper diagonal elements should be zero
:param size:
:return: mask where positions are filled with zero for subsequent positions
"""
# upper diagonal elements are 1s, lower diagonal and the main diagonal are zeroed
triu = torch.triu(torch.ones(size, size, dtype=torch.int8, device=device), diagonal=1)
# invert it
mask = triu == 0
mask = mask.unsqueeze(0)
return mask | 5c642e8f73ee33307b54193db2a15ec80518e673 | 3,652,183 |
from typing import Optional
import google
def read_gcs_file_if_exists(gcs_client: storage.Client,
gsurl: str) -> Optional[str]:
"""return string of gcs object contents or None if the object does not exist
"""
try:
return read_gcs_file(gcs_client, gsurl)
except google.cloud.exceptions.NotFound:
return None | 1354d319dd20193fa097e4f2c6e7cce8edbda98b | 3,652,184 |
from typing import Union
from typing import List
from typing import Tuple
from typing import Dict
def collapse_multigraph_to_nx(
graph: Union[gr.MultiDiGraph, gr.OrderedMultiDiGraph]) -> nx.DiGraph:
""" Collapses a directed multigraph into a networkx directed graph.
In the output directed graph, each node is a number, which contains
itself as node_data['node'], while each edge contains a list of the
data from the original edges as its attribute (edge_data[0...N]).
:param graph: Directed multigraph object to be collapsed.
:return: Collapsed directed graph object.
"""
# Create the digraph nodes.
digraph_nodes: List[Tuple[int, Dict[str,
nd.Node]]] = ([None] *
graph.number_of_nodes())
node_id = {}
for i, node in enumerate(graph.nodes()):
digraph_nodes[i] = (i, {'node': node})
node_id[node] = i
# Create the digraph edges.
digraph_edges = {}
for edge in graph.edges():
src = node_id[edge.src]
dest = node_id[edge.dst]
if (src, dest) in digraph_edges:
edge_num = len(digraph_edges[src, dest])
digraph_edges[src, dest].update({edge_num: edge.data})
else:
digraph_edges[src, dest] = {0: edge.data}
# Create the digraph
result = nx.DiGraph()
result.add_nodes_from(digraph_nodes)
result.add_edges_from(digraph_edges)
return result | 1239393366071371116c5ebbb28209fa753b3db8 | 3,652,185 |
def get_encoder_type(encoder_name):
""" gets the class of the encoer of the given name """
if encoder_name == 'Dense':
return DenseEncoder
elif encoder_name == 'CNN':
return CNNEncoder
else:
raise ValueError(encoder_name) | 31fbce2fb26ebdaf2d3701d36d0d977039b03e42 | 3,652,186 |
def stateless_truncated_normal(shape,
seed,
mean=0.0,
stddev=1.0,
dtype=dtypes.float32,
name=None):
"""Outputs deterministic pseudorandom values, truncated normally distributed.
This is a stateless version of `tf.random.truncated_normal`: if run twice with
the
same seeds, it will produce the same pseudorandom numbers. The output is
consistent across multiple runs on the same hardware (and between CPU
and GPU), but may change between versions of TensorFlow or on non-CPU/GPU
hardware.
The generated values follow a normal distribution with specified mean and
standard deviation, except that values whose magnitude is more than 2 standard
deviations from the mean are dropped and re-picked.
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
seed: A shape [2] Tensor, the seed to the random number generator. Must have
dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.)
mean: A 0-D Tensor or Python value of type `dtype`. The mean of the
truncated normal distribution.
stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation
of the normal distribution, before truncation.
dtype: The type of the output.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random truncated normal values.
"""
with ops.name_scope(name, "stateless_truncated_normal",
[shape, seed, mean, stddev]) as name:
shape = tensor_util.shape_tensor(shape)
mean = ops.convert_to_tensor(mean, dtype=dtype, name="mean")
stddev = ops.convert_to_tensor(stddev, dtype=dtype, name="stddev")
rnd = gen_stateless_random_ops.stateless_truncated_normal(
shape, seed, dtype)
result = math_ops.add(rnd * stddev, mean, name=name)
tensor_util.maybe_set_static_shape(result, shape)
return result | b0b967cc6d8e489cb4f914f7306ef5d5376977b0 | 3,652,187 |
def mean(l):
"""
Returns the mean value of the given list
"""
sum = 0
for x in l:
sum = sum + x
return sum / float(len(l)) | 74926c9aaafd2362ce8821d7040afcba1f569400 | 3,652,188 |
from bettermoments.quadratic import quadratic
def collapse_quadratic(velax, data, rms):
"""
Collapse the cube using the quadratic method presented in `Teague &
Foreman-Mackey (2018)`_. Will return the line center, ``v0``, and the
uncertainty on this, ``dv0``, as well as the line peak, ``Fnu``, and the
uncertainty on that, ``dFnu``. This provides the sub-channel precision of
:func:`bettermoments.collapse_cube.collapse_first` with the robustness to
noise from :func:`bettermoments.collapse_cube.collapse_ninth`.
.. _Teague & Foreman-Mackey (2018): https://iopscience.iop.org/article/10.3847/2515-5172/aae265
Args:
velax (ndarray): Velocity axis of the cube.
data (ndarray): Flux density or brightness temperature array. Assumes
that the zeroth axis is the velocity axis.
rms (float): Noise per pixel in same units as ``data``.
Returns:
``v0`` (`ndarray`), ``dv0`` (`ndarray`), ``Fnu`` (`ndarray`), ``dFnu`` (`ndarray`):
``v0``, the line center in the same units as ``velax`` with ``dv0``
as the uncertainty on ``v0`` in the same units as ``velax``.
``Fnu`` is the line peak in the same units as the
``data`` with associated uncertainties, ``dFnu``.
"""
chan = np.diff(velax).mean()
return np.squeeze(quadratic(data, x0=velax[0], dx=chan, uncertainty=rms)) | b19b6ee4b75c246e505fc5a4a47eac736c40599a | 3,652,190 |
def get_module_version(module_name: str) -> str:
"""Check module version. Raise exception when not found."""
version = None
if module_name == "onnxrt":
module_name = "onnxruntime"
command = [
"python",
"-c",
f"import {module_name} as module; print(module.__version__)",
]
proc = Proc()
proc.run(args=command)
if proc.is_ok:
for line in proc.output:
version = line.strip()
proc.remove_logs()
if version is None:
raise ClientErrorException(f"Could not found version of {module_name} module.")
return version | 32c608491e012bd0b77d0e52aa206332877d889b | 3,652,191 |
import requests
def _post(url, data):
"""RESTful API post (insert to database)
Parameters
----------
url: str
Address for the conftrak server
data: dict
Entries to be inserted to database
"""
r = requests.post(url,
data=ujson.dumps(data))
r.raise_for_status()
return r.json() | 6cbf8700360e6eff868eef91125a692dfef5af47 | 3,652,192 |
import inspect
import functools
def curry(arity_or_fn=None, ignore_kwargs=False, evaluator=None, *args, **kw):
"""
Creates a function that accepts one or more arguments of a function and
either invokes func returning its result if at least arity number of
arguments have been provided, or returns a function that accepts the
remaining function arguments until the function arity is satisfied.
This function is overloaded: you can pass a function or coroutine function
as first argument or an `int` indicating the explicit function arity.
Function arity can be inferred via function signature or explicitly
passed via `arity_or_fn` param.
You can optionally ignore keyword based arguments as well passsing the
`ignore_kwargs` param with `True` value.
This function can be used as decorator.
Arguments:
arity_or_fn (int|function|coroutinefunction): function arity to curry
or function to curry.
ignore_kwargs (bool): ignore keyword arguments as arity to satisfy
during curry.
evaluator (function): use a custom arity evaluator function.
*args (mixed): mixed variadic arguments for partial function
application.
*kwargs (mixed): keyword variadic arguments for partial function
application.
Raises:
TypeError: if function is not a function or a coroutine function.
Returns:
function or coroutinefunction: function will be returned until all the
function arity is satisfied, where a coroutine function will be
returned instead.
Usage::
# Function signature inferred function arity
@paco.curry
async def task(x, y, z=0):
return x * y + z
await task(4)(4)(z=8)
# => 24
# User defined function arity
@paco.curry(4)
async def task(x, y, *args, **kw):
return x * y + args[0] * args[1]
await task(4)(4)(8)(8)
# => 80
# Ignore keyword arguments from arity
@paco.curry(ignore_kwargs=True)
async def task(x, y, z=0):
return x * y
await task(4)(4)
# => 16
"""
def isvalidarg(x):
return all([
x.kind != x.VAR_KEYWORD,
x.kind != x.VAR_POSITIONAL,
any([
not ignore_kwargs,
ignore_kwargs and x.default == x.empty
])
])
def params(fn):
return inspect.signature(fn).parameters.values()
def infer_arity(fn):
return len([x for x in params(fn) if isvalidarg(x)])
def merge_args(acc, args, kw):
_args, _kw = acc
_args = _args + args
_kw = _kw or {}
_kw.update(kw)
return _args, _kw
def currier(arity, acc, fn, *args, **kw):
"""
Function either continues curring of the arguments
or executes function if desired arguments have being collected.
If function curried is variadic then execution without arguments
will finish curring and trigger the function
"""
# Merge call arguments with accumulated ones
_args, _kw = merge_args(acc, args, kw)
# Get current function call accumulated arity
current_arity = len(args)
# Count keyword params as arity to satisfy, if required
if not ignore_kwargs:
current_arity += len(kw)
# Decrease function arity to satisfy
arity -= current_arity
# Use user-defined custom arity evaluator strategy, if present
currify = evaluator and evaluator(acc, fn)
# If arity is not satisfied, return recursive partial function
if currify is not False and arity > 0:
return functools.partial(currier, arity, (_args, _kw), fn)
# If arity is satisfied, instanciate coroutine and return it
return fn(*_args, **_kw)
def wrapper(fn, *args, **kw):
if not iscallable(fn):
raise TypeError('paco: first argument must a coroutine function, '
'a function or a method.')
# Infer function arity, if required
arity = (arity_or_fn if isinstance(arity_or_fn, int)
else infer_arity(fn))
# Wraps function as coroutine function, if needed.
fn = wraps(fn) if isfunc(fn) else fn
# Otherwise return recursive currier function
return currier(arity, (args, kw), fn, *args, **kw) if arity > 0 else fn
# Return currier function or decorator wrapper
return (wrapper(arity_or_fn, *args, **kw)
if iscallable(arity_or_fn)
else wrapper) | 225f9295894538046f3dc5390964416c2ef6a7d1 | 3,652,193 |
def fin_forecast(ratio1, ratio2, sp_df):
"""used to forecast 3 years of financial forecast/projection
"""
print("print test line 6")
forecast = MCSimulation(
portfolio_data = sp_df,
weights = [ratio1, ratio2],
num_simulation = 500,
num_trading_days = 252*3
)
print("test line 3")
print(forecast.portfolio_data.head())
simulation = forecast.portfolio_data
#return ratio1, ratio2, sp_df
return simulation | 7d081d58bf9ec779fe6a68818e0f245c9c634db8 | 3,652,195 |
def implied_volatility(price, S, K, t, r, q, flag):
"""Calculate the Black-Scholes-Merton implied volatility.
:param S: underlying asset price
:type S: float
:param K: strike price
:type K: float
:param sigma: annualized standard deviation, or volatility
:type sigma: float
:param t: time to expiration in years
:type t: float
:param r: risk-free interest rate
:type r: float
:param q: annualized continuous dividend rate
:type q: float
:param flag: 'c' or 'p' for call or put.
:type flag: str
>>> S = 100
>>> K = 100
>>> sigma = .2
>>> r = .01
>>> flag = 'c'
>>> t = .5
>>> q = .02
>>> price = black_scholes_merton(flag, S, K, t, r, sigma, q)
>>> implied_volatility(price, S, K, t, r, q, flag)
0.20000000000000018
>>> flac = 'p'
>>> sigma = 0.3
>>> price = black_scholes_merton(flag, S, K, t, r, sigma, q)
>>> price
8.138101080183894
>>> implied_volatility(price, S, K, t, r, q, flag)
0.30000000000000027
"""
f = lambda sigma: price - black_scholes_merton(flag, S, K, t, r, sigma, q)
return brentq(
f,
a=1e-12,
b=100,
xtol=1e-15,
rtol=1e-15,
maxiter=1000,
full_output=False
) | d3db13c24cf491df519d286e25da2e0a33448615 | 3,652,197 |
import re
def clean_hotel_maxpersons(string):
"""
"""
if string is not None:
r = int(re.findall('\d+', string)[0])
else:
r = 0
return r | d20d9db1da49eea1a4057e43b9f43f2960dbd27a | 3,652,198 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.