content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def parse_cfg(filename):
"""
Inputs:
- cfg's file name, e.g. 'yolov3.cfg'
Returns:
- a list of NN blocks, each block is represented as a dictionary
"""
file = open(filename, 'r')
lines = file.read().split('\n')
lines = [x.rstrip().lstrip() for x in lines if len(x) > 0 and x[0] != '#']
blocks = []
block = {}
for line in lines:
if line[0] == '[':
if len(block) != 0:
blocks.append(block)
block = {}
block['type'] = line[1:-1].rstrip()
else:
s = line.split('=')
block[s[0].lstrip().rstrip()] = s[1].lstrip().rstrip()
blocks.append(block)
return blocks
|
f953dfa6304732572c058a01b42de83f1dbc9eb5
| 695,598 |
def _cnx_is_empty(in_file):
"""Check if cnr or cns files are empty (only have a header)
"""
with open(in_file) as in_handle:
for i, line in enumerate(in_handle):
if i > 0:
return False
return True
|
1f5706e3b3733527baedd4df240f0be88adb80a5
| 695,603 |
def from_none(exc):
"""Emulates raise ... from None (PEP 409) on older Python-s
"""
try:
exc.__cause__ = None
except AttributeError:
exc.__context__ = None
return exc
|
1cf96c58c7ef601a98a6891d2f969da8799f99b2
| 695,604 |
def error_503(_):
"""Maintenance."""
return 'PixyShip is down for maintenance', 503
|
3acecece1da3191d699fd58ebcd840494cf31b4c
| 695,605 |
def cleanup_time_string(t):
"""
convert from microseconds to seconds, and only output 3 s.f.
"""
time_in_seconds = float(t) / 1e6
if time_in_seconds < 1:
time_in_seconds = round(time_in_seconds, 5)
else:
time_in_seconds = int(time_in_seconds)
timestring = str(time_in_seconds)
return timestring
|
dccbab566aa4112468cfa7ea0d059475ad76d65b
| 695,606 |
def anyfalse(bools):
"""Returns True iff any elements of iterable `bools` are False
>>> anyfalse([True, True])
False
>>> anyfalse([True, False, True])
True
>>> anyfalse(None)
False
"""
if bools is None: return False
for b in bools:
if not b:
return True
return False
|
1d846b588705349b8fb8f0fd8593c0c87809bd5d
| 695,610 |
def hexify(env, req):
"""
Convert integer parameter to hex
"""
return str(hex(int(req.match.group(1))))
|
ea70d5df380e08cad60247345aa6b22c5eb6bb66
| 695,614 |
def corners_to_center_scale(p0, p1):
"""Convert bounding boxes from "corners" form to "center+scale" form"""
yx = 0.5 * (p0 + p1)
hw = p1 - p0
return yx, hw
|
ea313fbe774d29c4b05a886323425730a1b409af
| 695,616 |
def wait(method):
"""
Decorator to wait for previous timers and to start a new one on exit
:param:
- `method`: method to wrap with a timer.wait call
:return: wrapped method
"""
def _method(self, *args, **kwargs):
# wait if timer is running but only up until the time-limit
self.timer.wait(self.timer.seconds)
self.timer.clear()
outcome = method(self, *args, **kwargs)
self.timer.start()
return outcome
return _method
|
c02bede214a3891ea50bc317db123dd3d32e8044
| 695,617 |
def condition_domain_reduction(csp, var) :
"""Returns True if var should be enqueued under the all-reduced-domains
condition, otherwise False"""
return True
|
968c44431058d9c1007bb11f4f247c70e6865a71
| 695,618 |
def log_url(ip, port, project, spider, job):
"""
get log url
:param ip: host
:param port: port
:param project: project
:param spider: spider
:param job: job
:return: string
"""
url = 'http://{ip}:{port}/logs/{project}/{spider}/{job}.log'.format(
ip=ip, port=port, project=project, spider=spider, job=job
)
return url
|
2e61796b213698bc70b1ebf968d31fa678e8258e
| 695,620 |
def format_for_null(value):
"""If a Python value is None, we want it to convert to null in json."""
if value is None:
return value
else:
return "{}".format(value)
|
004ada8d8496705c2afd42b82feef3c7a6e38079
| 695,627 |
def getIIsum(data, U, B):
"""
Compute summed area as:
A=U Bi=U[0],B[1]
+----------+
| |
| |
+----------+
C=B[0],U[1] D=B
\sum = I(D) - I(A) + I(Bi) + I(C)
"""
if (U == B):
return data[U]
else:
return (data[B] + data[U]) - (data[U[0], B[1]] + data[B[0], U[1]])
|
25f1b937036323a17465382df6a076f3269775fd
| 695,629 |
def _addHeaderToRequest(request, header):
"""
Add a header tuple to a request header object.
@param request: The request to add the header tuple to.
@type request: L{twisted.web.http.Request}
@param header: The header tuple to add to the request.
@type header: A L{tuple} with two elements, the header name and header
value, both as L{bytes}.
@return: If the header being added was the C{Content-Length} header.
@rtype: L{bool}
"""
requestHeaders = request.requestHeaders
name, value = header
values = requestHeaders.getRawHeaders(name)
if values is not None:
values.append(value)
else:
requestHeaders.setRawHeaders(name, [value])
if name == b"content-length":
request.gotLength(int(value))
return True
return False
|
a42f99b922b5234671360b93d0a6326ca24cf6a2
| 695,631 |
import click
def _validate_month(ctx, param, val):
"""Helper function to validate a month coming from the CLI."""
if val < 1 or val > 12:
raise click.BadParameter('Month must be between 1 and 12')
return val
|
a38de3a9d62a38f90e12fad675a810eadf9fee22
| 695,632 |
def softmax_est_crossentropy_deriv(y_est, y):
"""
Compute the gradient of the multiclass softmax cross-entropy
with respect to its input variables, given only the output
of the softmax function
Parameters
----------
y_est: ndarray(N)
Output of the softmax function
y: ndarray(N)
Target values
Returns
-------
ndarray(N):
Derivative of multiclass softmax cross-entropy
"""
return y_est - y
|
f2519967529c3fe79322898391a347eaf306c804
| 695,633 |
def nvl(value, default=''):
"""Get specified value, or an empty string if value is empty
:param object value: value to be checked
:param object default: default value to be returned if value is *false*
:return: input value, or *default* if value is *false*
>>> from pyams_utils.unicode import nvl
>>> nvl(None)
''
>>> nvl('foo')
'foo'
>>> nvl(False, 'bar')
'bar'
"""
return value or default
|
5334bc6e5a142e217bc40600bd493bd0c39b037c
| 695,641 |
def njit_time_point_thresh(wf_in, threshold, tp_max, tp_out):
"""
Find the last timepoint before tp_max that wf_in crosses a threshold
wf_in: input waveform
threshold: threshold to search for
tp_max: time of a maximum of a waveform that the search starts at
tp_out: final time that waveform is less than threshold
"""
for i in range(tp_max, 0, -1):
if(wf_in[i]>threshold and wf_in[i-1]<threshold):
tp_out = i
return tp_out
tp_out = 0
return tp_out
|
cebeecb4bc176bb72015b28aedaf6b0ddca8e1b6
| 695,644 |
def _path_to_release(path):
"""Compatibility function, allows us to use release identifiers like "3.0" and "3.1"
in the public API, and map these internally into storage path segments."""
if path == "v3":
return "3.0"
elif path.startswith("v3."):
return path[1:]
else:
raise RuntimeError(f"Unexpected release path: {path!r}")
|
84ea8a22e3a1d82df249161fd76b12c370f70742
| 695,654 |
def bitwise_or(first_value, second_value):
"""Perform a bitwise or operation."""
return first_value | second_value
|
21cd3ba65fded59c152183445e59f1aa99b4c72e
| 695,659 |
import warnings
def assert_warns(wtype, f, *args, **kwargs):
"""Check that a function call `f(*args, **kwargs)` raises a warning of
type wtype.
Returns the output of `f(*args, **kwargs)` unless there was no warning,
in which case an AssertionError is raised.
"""
# Check that f() raises a warning, but not an error.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
res = f(*args, **kwargs)
assert len(w) >= 1, "Expected warning was not raised."
assert issubclass(w[0].category, wtype), \
"Warning raised was the wrong type (got %s, expected %s)" % (
w[0].category, wtype)
return res
|
aa27979e2c9bba268a676ed070296e52add7b210
| 695,664 |
import hashlib
def get_fingerprint(contents: str) -> str:
"""
Generate a fingerprint for the contents of a virtual relation.
This fingerprint is used by the server for caching purposes.
:param contents: The full contents of a tsv file
:returns: md5 sum representing the file contents
"""
md5 = hashlib.md5()
md5.update(repr(contents).encode())
return md5.hexdigest()
|
65dd77ca873b8928af5af1f3ea036555e835c418
| 695,667 |
def crop_image(frame, bbox):
"""Return the cropped image from frame specified by bbox"""
x_start, x_end = int(bbox[0]), int(bbox[2])
y_start, y_end = int(bbox[1]), int(bbox[3])
crop_img = frame[y_start:y_end, x_start:x_end, :].copy()
return crop_img
|
702477cd6c98a6170d254b3837d2dfbd605d8ae3
| 695,669 |
import math
def GeometricMean(values):
"""Compute a rounded geometric mean from an array of values."""
if not values:
return None
# To avoid infinite value errors, make sure no value is less than 0.001.
new_values = []
for value in values:
if value > 0.001:
new_values.append(value)
else:
new_values.append(0.001)
# Compute the sum of the log of the values.
log_sum = sum(map(math.log, new_values))
# Raise e to that sum over the number of values.
mean = math.pow(math.e, (log_sum / len(new_values)))
# Return the rounded mean.
return int(round(mean))
|
bd1bb53943c8db09c18f1e4710c36000fa07671d
| 695,670 |
from typing import Tuple
import string
def return_c_values(cardinality: int) -> Tuple[list, list]:
"""Return categorical values for C+ and C-.
Create string values to be used for the categorical variable c.
We build two sets of values C+ and C-. All values from C+ end with
"A" and all values from C- end with "B". The cardinality input
determines len(c_pos) + len(c_neg).
Args:
cardinality (int): cardinality of c
Returns:
c_pos (list): categorical values from C+ sample
c_neg (list): categorical values from C- sample
"""
suffixes = [
"{}{}".format(i, j)
for i in string.ascii_lowercase
for j in string.ascii_lowercase]
c_pos = ["{}A".format(s) for s in suffixes][:int(cardinality / 2)]
c_neg = ["{}B".format(s) for s in suffixes][:int(cardinality / 2)]
return c_pos, c_neg
|
7b25599b2fb9efb1f053317d19f94f73b5889a36
| 695,671 |
import torch
def mat2euler(mat):
""" Convert rotation matrix to euler angles.
https://github.com/pulkitag/pycaffe-utils/blob/master/rot_utils.py#L283
Args:
mat: rotation matrix in zyx format -- size = [B, 3, 3]
Returns:
angle: rotation angle along 3 axis (in radians, it's not unique) -- size = [B, 3]
"""
cy_thresh = 1e-10
cy = torch.sqrt(mat[:, 2, 2]*mat[:, 2, 2] + mat[:, 1, 2]*mat[:, 1, 2])
if (cy > cy_thresh).any(): # cos(y) not close to zero, standard form
z = torch.atan2(-mat[:, 0, 1], mat[:, 0, 0]) # atan2(cos(y)*sin(z), cos(y)*cos(z))
y = torch.atan2(mat[:, 0, 2], cy) # atan2(sin(y), cy)
x = torch.atan2(-mat[:, 1, 2], mat[:, 2, 2]) # atan2(cos(y)*sin(x), cos(x)*cos(y))
else: # cos(y) (close to) zero, so x -> 0.0 (see above)
# so r21 -> sin(z), r22 -> cos(z) and
z = torch.atan2(mat[:, 1, 0], mat[:, 1, 1])
y = torch.atan2(mat[:, 0, 2], cy) # atan2(sin(y), cy)
x = torch.zeros_like(mat[:, 0, 0])
return torch.cat([x.unsqueeze(-1), y.unsqueeze(-1), z.unsqueeze(-1)], -1).view(-1, 3)
|
a940f61fd00ce28c01bce7704920a49cf8bab8eb
| 695,674 |
import json
def read_shapefile_data(path,file_name):
"""
Reads in a json file containing the path to shapefiles,
on regional, province and municipality level
and crs foe encoding
"""
d=json.load(open(path+file_name))
path_shapefiles=d["path_shapefiles"]
regions=d["regions"]
provinces=d['provinces']
municipalities=d['municipalities']
territories=d['area_territoriali']
crs=d['crs']
return path_shapefiles,regions,provinces,territories,municipalities,crs
|
b209b594debabcd26f5cd8dd6a8f41fac0207560
| 695,676 |
def zstrip(chars):
"""Strip all data following the first zero in the string"""
if '\0' in chars:
return chars[:chars.index("\0")]
return chars
|
0c03f677008b6195723f3d62e55f108d4d265742
| 695,678 |
from typing import Tuple
def channel_error(input_shape: Tuple[int, int, int]) -> ValueError:
"""
Value Error Message for Channel Error
:param input_shape: Three Int Tuple
:return: Custom text Value Error
"""
return ValueError(
f"The input must have 3 channels; got `input_shape={str(input_shape)}`"
)
|
a74db84d57f95524fd95e7f2d7225308949369cd
| 695,679 |
import json
def _json_dumps(value):
"""
json.dumps parameters for dumping unicode into JS
"""
return json.dumps(value, separators=(",", ":"), ensure_ascii=False)
|
79a9d8b51df110ce19baa6a022d38a24a8492591
| 695,680 |
def _extended_gcd(a, b):
"""Returns (g, x, y) such that a*x + b*y = g = gcd(a, b)"""
x, x_old, y, y_old = 0, 1, 1, 0
while a != 0:
(q, a), b = divmod(b, a), a
y, y_old = y_old, y - q * y_old
x, x_old = x_old, x - q * x_old
return b, x, y
|
e96a65990cc9e6165867ccfd7756dcf0ae2b33d8
| 695,681 |
def guess_type(text):
"""
Guess the type of a value encoded in a string.
"""
# int
try:
int(text)
return int
except:
pass
# float
try:
float(text)
return float
except ValueError:
pass
# string
return str
|
bd2c7fa52ff4ee79f87d9018493ccb3f1394daa6
| 695,682 |
import math
def list_values_approx_equal(num_list, rel_tol):
"""
Check if all values in a list are within a relative tolerance of each other
:param num_list: List of numbers
:param rel_tol: The relative numerical tolerance
:return: Truth value
"""
for i in range(len(num_list)-1):
for j in range(i, len(num_list), 1):
if not math.isclose(num_list[i], num_list[j], rel_tol=rel_tol):
return False
return True
|
02007cbdbc5591cf33f64946f9d80bb8eb69b4ee
| 695,685 |
def sqlobj_from_dict(obj, values):
"""
Merge in items in the values dict into our object if it's one of our columns.
"""
for c in obj.__table__.columns:
if c.name in values:
setattr(obj, c.name, values[c.name])
# This return isn't necessary as the obj referenced is modified, but it makes it more
# complete and consistent
return obj
|
2aa65226f5bc5abb9870ab4a9e414dff691beaa7
| 695,688 |
def _get_working_shape_and_iterations(requested_shape, max_power_of_two=13):
"""Returns the necessary size for a square grid which is usable in a DS algorithm.
The Diamond Square algorithm requires a grid of size n x n where n = 2**x + 1, for any
integer value of x greater than two. To accomodate a requested map size other than these
dimensions, we simply create the next largest n x n grid which can entirely contain the
requested size, and return a subsection of it.
This method computes that size.
PARAMETERS
----------
requested_shape
A 2D list-like object reflecting the size of grid that is ultimately desired.
max_power_of_two
an integer greater than 2, reflecting the maximum size grid that the algorithm can EVER
attempt to make, even if the requested size is too big. This limits the algorithm to
sizes that are manageable, unless the user really REALLY wants to have a bigger one.
The maximum grid size will have an edge of size (2**max_power_of_two + 1)
RETURNS
-------
An integer of value n, as described above.
"""
if max_power_of_two < 3:
max_power_of_two = 3
largest_edge = max(requested_shape)
for power in range(1, max_power_of_two+1):
d = (2**power) + 1
if largest_edge <= d:
return (d, d), power
#failsafe: no values in the dimensions array were allowed, so print a warning and return
# the maximum size.
d = 2**max_power_of_two + 1
print("DiamondSquare Warning: Requested size was too large. Grid of size {0} returned""".format(d))
return (d, d), max_power_of_two
|
c5f61347c17cc584d68dc6cccae7cd78c4a15906
| 695,689 |
def unmap_from_unit_interval(y, lo=0., hi=1.):
""" Linearly map value in [0, 1] to [lo_val, hi_val] """
return y * (hi - lo) + lo
|
35a6dfbad855f80fa2034eb88cb41fcedc9a00d5
| 695,691 |
def parse_raw(sqlContext, input, user):
"""Parses the raw json for a user"""
df = sqlContext.read.json(input + "/" + user + "/" + user + ".json", multiLine=True)
return df
|
f8ff9544e222d92aa3825d9739ddec7d5970e8e5
| 695,693 |
def get_run_id_keys(json_results, run_id_list):
"""This function finds the key used in the json_results dictionary
for a given run ID. These dictionary keys are the files names and
there are potentially many keys that are associated with a run ID.
For the intended purposes of this function (metadata comparison),
any file associated with the run ID will have the same metadata as
any other file associated with the run ID; it doesn't matter which
one we use.
Args:
json_results (dict) - benchmark results
run_id_list (list) - List of strings of the run-IDs being compared
Returns:
run_id_keys (dict) - Dictionary relating run-IDs (five character
unique IDs) and the key for said run ID in the json_results
dictionary.
"""
run_id_keys = {}
for run_id in run_id_list:
for key in json_results:
if run_id in key:
if run_id not in run_id_keys:
run_id_keys[run_id] = key
break
return run_id_keys
|
c3550508576fe595b01c2569c64d889f2edf07dc
| 695,696 |
def make_uniform(planes_dict, uniques, padding):
""" Ensure each section has the same number of images
This function makes the output collection uniform in
the sense that it preserves same number of planes across
sections. It also captures additional planes based
on the value of the padding variable
Args:
planes_dict (dict): planes to keep in different sections
uniques (list): unique values for the major grouping variable
padding (int): additional images to capture outside cutoff
Returns:
dictionary: dictionary containing planes to keep
"""
# max no. of planes
max_len = max([len(i) for i in planes_dict.values()])
# max planes that can be added on each side
min_ind = min([min(planes_dict[k]) for k in planes_dict])
max_ind = max([max(planes_dict[k]) for k in planes_dict])
max_add_left = uniques.index(min_ind)
max_add_right = len(uniques) - (uniques.index(max_ind)+1)
# add planes in each section based on padding and max number of planes
for section_id, planes in planes_dict.items():
len_to_add = max_len - len(planes)
len_add_left = min(int(len_to_add)/2+padding, max_add_left)
len_add_right = min(len_to_add - len_add_left+padding, max_add_right)
left_ind = int(uniques.index(min(planes)) - len_add_left)
right_ind = int(uniques.index(max(planes)) + len_add_right)+1
planes_dict[section_id] = uniques[left_ind:right_ind]
return planes_dict
|
8f67f7226dcf8846707f9d190eb9b15ccb1b27e9
| 695,700 |
import contextlib
import wave
def read_wave(path):
"""Reads a .wav file.
Takes the path, and returns (PCM audio data, sample rate).
"""
with contextlib.closing(wave.open(path, 'rb')) as wf:
num_channels = wf.getnchannels()
# print(num_channels)
assert num_channels == 1
sample_width = wf.getsampwidth()
assert sample_width == 2
sample_rate = wf.getframerate()
assert sample_rate == 16000
pcm_data = wf.readframes(wf.getnframes())
return pcm_data, sample_rate
|
6c2aa73a313147f99992a2d2523a1ce1d7c5252b
| 695,703 |
def calc_install(runs, cost_index, system_index, tax_rate):
"""
Calculate total install cost for a manufacturing job.
:param runs: Number of runs in the job.
:param cost_index: Cost Index for the item.
:param system_index: Cost Index for the star system where construction
would occur.
:param tax_rate: Tax rate for the facility where construction would occur.
Rate should be represented as a percent. (e.g. the 10% of cost
index tax charged in NPC stations is given as 10)
:return:
"""
job_fee = runs * float(cost_index) * float(system_index)
facility_tax = job_fee * float(tax_rate) / 100
return job_fee + facility_tax
|
bf0b9da1f07e6e10008f4a1e1e498d1d3d6845bd
| 695,719 |
def end_game(game, *args):
"""Ends the game."""
end_message = args[0]
print(end_message)
return True
|
71b1aaa0d5c24e5a16c7f6acd2226a5042401f0e
| 695,721 |
def is_boundary(horizon, x):
"""
Function which marks displacement boundary constrained particles
2 is no boundary condition (the number here is an arbitrary choice)
-1 is displacement loaded IN -ve direction
1 is displacement loaded IN +ve direction
0 is clamped boundary
"""
# Does not live on a boundary
bnd = 2
# Does live on boundary
if x[0] < 1.5 * horizon:
bnd = -1
elif x[0] > 1.0 - 1.5 * horizon:
bnd = 1
return bnd
|
0e8252be537ab72b2a469e2d3eadb0d1336b6326
| 695,722 |
from math import floor, log10
def sci_notation(num, decimal_digits=1, precision=None, exponent=None):
"""
Returns a string representation of the scientific
notation of the given number formatted for use with
LaTeX or Mathtext, with specified number of significant
decimal digits and precision (number of decimal digits
to show). The exponent to be used can also be specified
explicitly.
"""
if exponent is None:
exponent = int(floor(log10(abs(num))))
coeff = round(num / float(10**exponent), decimal_digits)
if precision is None:
precision = decimal_digits
return r"${0:.{1}f}\times$".format(coeff,precision)\
+ "10" + r"$^{{{0:d}}}$".format(exponent)
|
4990887697d86a5ce5453f289c559991652c5269
| 695,723 |
import re
def split_hostname_index(name, default=None):
"""
Split the index out of a hostname.
E.g. splits "test^1" into ("test", 1). If no index is present, returns
(name, default).
"""
match = re.match(r'^(.*)~(\d+)$', name)
if match:
return match.group(1), int(match.group(2))
else:
return name, default
|
eb4d036e0cdc96d43d1a23946a56f48f88d887c8
| 695,725 |
from datetime import datetime
def footer_datetime(timestamp: str) -> datetime:
"""Takes an embed timestamp and returns a timezone-aware datetime object."""
return datetime.fromisoformat(timestamp)
|
a7c26eb6c4855af55eaaaee1f28a46d9a14934bf
| 695,726 |
def v6_int_to_packed(address):
"""Represent an address as 16 packed bytes in network (big-endian) order.
Args:
address: An integer representation of an IPv6 IP address.
Returns:
The 16-byte packed integer address in network (big-endian) order.
Raises:
ValueError: If address is negative or too large for an IPv6 address.
"""
try:
return address.to_bytes(16, 'big')
except OverflowError:
raise ValueError('Address negative or too large for IPv6')
|
5f4bb8cbed9f1cddace9c0257efc6f5ec2e2d78d
| 695,732 |
def tokenizer(x):
""" Trivial tokenizer """
return x
|
49b9559363def5686c254164d9038087c29d3e66
| 695,735 |
def binary_to_decimal(binary):
"""Converts a binary number(str) into a decimal(int)"""
reversed_binary = binary[::- 1]
# i = corresponds to power of 2 when reversed
decimal = 0 # keep track of sum
for i, value in enumerate(reversed_binary):
if value == "0":
continue # ignore 0 because no value
decimal += 2**i # multiply 2 by i b/c i = exponent
return decimal
|
c5a25cc9594f52e886b1b2a7d10667de709e5d0b
| 695,742 |
def short_hex(value):
"""
Convert to a nice hex number without the 0x in front to save screen space
:param value: value
:return: short string hex representation
"""
hex1 = hex(value)
hex2 = hex1[2:]
if len(hex2) == 1: hex2 = "0" + hex2
return hex2
|
8d8c0b28c02dc3ba6a13c12fb4e44cb3e1229c7c
| 695,743 |
def find_subtractive_combinations(string):
"""
Finds subtractive combination pairs in string.
PARAMETERS:
string : str
RETURNS: ( (str pair, int index), ... )
Tuple containing all ordered subtractive combination pairs found and the respective index at which they start
"""
ivxlcdm = ["I", "V", "X", "L", "C", "D", "M"]
subtractive_pairs = []
previous_char = "M" # Max char (first case always goes through)
count = 0
for char in string:
if char not in ivxlcdm[:ivxlcdm.index(previous_char) + 1]: # char <= previous_char
subtractive_pairs.append((previous_char + char, count-1))
previous_char = char
count += 1
return tuple(subtractive_pairs)
|
b3fdc589e98d69ac4af9be219853a8e82f24a43a
| 695,744 |
def swap_columns(a, b, array):
"""
Function that swaps columns of a given matrix
:param a: int
:param b: int
:param array: numpy array
:return: array_swapped: numpy array with the columns swapped
"""
array_swapped = array.copy()
array_swapped[:, a] = array[:, b]
array_swapped[:, b] = array[:, a]
return array_swapped
|
40b848b24c6047d6faf7f07c0e9a5e18d78101fc
| 695,745 |
import string
import random
def random_string(length):
"""Generate random string of the given length."""
pool = string.ascii_letters + string.digits
return "".join(random.choice(pool) for _ in range(length))
|
f5e83dd2215d708b0ce5f8bd3e344b8fed12277d
| 695,746 |
def parse_zone_groups(player):
"""Creates a list of all Zones with attrbute
whether they are a group or a single player"""
all_zones = []
for group in player.all_groups:
if len(group.members) > 1:
all_zones.append({"kind":"G", "master":group.coordinator})
else:
all_zones.append({"kind":"P", "master":group.coordinator})
return all_zones
|
c5074a3b88f661dcc0e310e2447071348ecf346f
| 695,748 |
def get_default_memory_overhead(memory):
"""
The default memory overhead (related to both driver and executor) depends on how much memory is used: 0.10*memory,
with minimum of 384 MB.
:param memory: driver or executor memory
:return: default memory overhead
"""
MINIMUM_OVERHEAD = 384 << 20 # 384 MB
return 0.1*memory if 0.1*memory > MINIMUM_OVERHEAD else MINIMUM_OVERHEAD
|
25db1cfe67651cf0b32371e2d8a056b5215e264a
| 695,749 |
def _difference(idxs):
""" Returns the chained difference of the indexes given.
Parameters
----------
idxs : list
List of pandas.Index objects.
Returns
-------
idx : pandas.Index
The result of the chained difference of the indexes
given.
"""
idx = idxs[0]
for idx_part in idxs[1:]:
idx = idx.difference(idx_part)
return idx
|
d48e491f0c145d4286bf6c7675e0b511d39b87a4
| 695,752 |
def load_stopwords( inpath = "text/stopwords.txt" ):
"""
Load stopwords from a file into a set.
"""
stopwords = set()
with open(inpath) as f:
lines = f.readlines()
for l in lines:
l = l.strip()
if len(l) > 0:
stopwords.add(l)
return stopwords
|
5e733e97a3f56867d80edeb3db5a392362a23fe1
| 695,754 |
def ppr(b2, b3):
"""
Plant Pigment Ratio (Metternicht, 2003).
.. math:: PPR = (b3 - b2)/(b3 + b2)
:param b2: Blue.
:type b2: numpy.ndarray or float
:param b3: Green.
:type b3: numpy.ndarray or float
:returns PPR: Index value
.. Tip::
Metternicht, G. 2003. Vegetation indices derived from high-resolution \
airborne videography for precision crop management. International \
Journal of Remote Sensing 24(14), 2855-2877. \
doi:10.1080/01431160210163074
"""
PPR = (b3 - b2)/(b3 + b2)
return PPR
|
77a071d3c437dc3f202f6c7a35147c7473e17749
| 695,756 |
def get_file_extension(file_name):
"""e.g.: "/home/j/path/my.video.mp4" -> ".mp4"
Throws an exception, ValueError, if there is no "." character in file_name
:param file_name: <str> any string or path that is the name of a file
:return: the file extension of the param
"""
return file_name[file_name.rindex('.'):]
|
03789c21b6478f8cfa2707697e74f8d51995923b
| 695,764 |
def pg_utcnow(element, compiler, **kw):
"""
Postgres UTC timestamp object
"""
return "TIMEZONE('utc', CURRENT_TIMESTAMP)"
|
3deb31b98b8c75417ff0ecf5c7b5fa9eb0b91df9
| 695,769 |
def assign_value_if_none(value, default):
"""
Assign a value to a variable if that variable has value ``None`` on input.
Parameters
----------
value : object
A variable with either some assigned value, or ``None``
default : object The value to assign to the variable ``value`` if
``value is None`` returns ``True``
Returns
-------
new_value : object
The new value of ``value``
"""
return default if value is None else value
|
5034a65741bb763e632bb8493eb661229e75279a
| 695,770 |
def get_dict_val(dictionary, path, splitchar='/'):
"""
Return the value of the dictionnary at the given path of keys.
----------
INPUT
|---- dictionary (dict) the dict to search in.
|---- path (str) path-like string for key access ('key1/key2/key3' for ex.).
|---- splitchar (str) the character to use to split the path into keys.
OUTPUT
|---- dictionary (dict or value) the value of the dictionnary at the
| provided key path.
"""
for item in path.split(splitchar):
dictionary = dictionary[item]
return dictionary
|
eb58e42edc705f05d9e046e65f4ed823f42c9aac
| 695,771 |
def find_non_base_case_job(jobs):
"""Return a job that is not a base case."""
for job in jobs:
if job.model.base_case is not None:
assert not job.model.is_base_case
return job
raise Exception("Did not find a non-base-case job")
|
ab80ca1ad2e5293876ffa7973112bf19ae8ab308
| 695,775 |
def _get_new_steplist(reqs_to_keep, old_step_data, req_ids):
"""Returns a list similar to `old_step_data` but with unwanted requests removed.
Uses the requests and request components in `old_step_data` and the entitiy ids in `req_ids` to determine which elements in `old_step_data` to keep.
Parameters
----------
reqs_to_keep : dict
Dictionary of requests and request components to keep
old_step_data : list
List of all the step data in the results file
req_ids : dict
Dictionary of entity ids for the entire results file
Returns
-------
list
List of just the step data to keep
"""
# Start a new list with just the time element from `old_step_data`
new_step_data = [old_step_data[1]]
# Loop through the desired requests and components to pull elements
# from `old_step_data` into `new_step_data`
for request in reqs_to_keep:
# For each desired request
for req_comp in reqs_to_keep[request]:
# For each desired request component, add that components
# step data to `new_step_data`
req_id = int(req_ids[request][req_comp])
new_step_data.append(old_step_data[req_id])
return new_step_data
|
61e3c88dda3fae29a10c91b4abfc02ed4762f22e
| 695,776 |
def slon_e(lon_e, precision=0):
"""East longitude string.
Parameters
----------
lon_e: float
Input east longitude (degE).
precision: int, optional
Displayed float precision.
Returns
-------
str
Formatter longitude (`180°|90°W|0°|90°E|180°|`)
"""
return (f'{abs(lon_e):.{precision}f}°'
f'{"" if abs(lon_e % 180) <= 1.e-2 else "E" if lon_e > 0 else "W"}')
|
1968def96ac276e23e19991672653acdc00d65d2
| 695,777 |
from math import log
def is_power_of_two(x):
"""
Returns true if x is a power of two, false otherwise.
"""
if x <= 0:
return False
log2 = int(log(x, 2))
return x == 2 ** log2
|
092c13924e076c31c85ff93906c0947b61708c5a
| 695,779 |
import pathlib
def make_report_file_names(proj_full_path):
"""
make the directory and file names for a report
Args:
proj_full_path (string): the path of the results directory
Returns:
report_dir (pathlib.Path)
html_outfile (pathlib.Path)
hash_file (pathlib.Path)
"""
report_dir = pathlib.Path(proj_full_path).joinpath("report")
html_outfile = report_dir.joinpath("report.html")
hash_file = report_dir.joinpath("results_hash.json")
return (report_dir, html_outfile, hash_file)
|
8d995bb15c2b8710ad2fb16e2476b5a96421f379
| 695,780 |
import time
import math
def __project_gdf(gdf, to_crs=None, to_latlong=False):
"""
Project a GeoDataFrame to the UTM zone appropriate for its geometries'
centroid.
The simple calculation in this function works well for most latitudes, but
won't work for some far northern locations like Svalbard and parts of far
northern Norway.
Parameters
----------
gdf : GeoDataFrame
the gdf to be projected
to_crs : dict
if not None, just project to this CRS instead of to UTM
to_latlong : bool
if True, projects to latlong instead of to UTM
Returns
-------
GeoDataFrame
"""
default_crs = '+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs'
assert len(gdf) > 0, 'You cannot project an empty GeoDataFrame.'
start_time = time.time()
# if gdf has no gdf_name attribute, create one now
if not hasattr(gdf, 'gdf_name'):
gdf.gdf_name = 'unnamed'
# if to_crs was passed-in, use this value to project the gdf
if to_crs is not None:
projected_gdf = gdf.to_crs(to_crs)
# if to_crs was not passed-in, calculate the centroid of the geometry to
# determine UTM zone
else:
if to_latlong:
# if to_latlong is True, project the gdf to latlong
latlong_crs = default_crs
projected_gdf = gdf.to_crs(latlong_crs)
# log('Projected the GeoDataFrame "{}" to default_crs in {:,.2f} seconds'.format(gdf.gdf_name, time.time()-start_time))
else:
# else, project the gdf to UTM
# if GeoDataFrame is already in UTM, just return it
if (gdf.crs is not None) and ('+proj=utm ' in gdf.crs.to_string()):
return gdf
# calculate the centroid of the union of all the geometries in the
# GeoDataFrame
avg_longitude = gdf['geometry'].unary_union.centroid.x
# calculate the UTM zone from this avg longitude and define the UTM
# CRS to project
utm_zone = int(math.floor((avg_longitude + 180) / 6.) + 1)
utm_crs = '+proj=utm +zone={} +ellps=WGS84 +datum=WGS84 +units=m +no_defs'.format(utm_zone)
# project the GeoDataFrame to the UTM CRS
projected_gdf = gdf.to_crs(utm_crs)
# log('Projected the GeoDataFrame "{}" to UTM-{} in {:,.2f} seconds'.format(gdf.gdf_name, utm_zone, time.time()-start_time))
projected_gdf.gdf_name = gdf.gdf_name
return projected_gdf
|
6b3e67285ff9229fb96d609ffd222296dc1d7ae2
| 695,781 |
import re
def is_valid_rvpsa_id(rvpsa_id):
"""
Validates a remote VPSA ID, also known as the remote VPSA "name". A valid
remote VPSA name should look like: rvpsa-00000001 - It should always start
with "rvpsa-" and end with 8 hexadecimal characters in lower case.
:type rvpsa_id: str
:param rvpsa_id: The remote VPSA name to be validated.
:rtype: bool
:return: True or False depending on whether rvpsa_id passes validation.
"""
if rvpsa_id is None:
return False
match = re.match(r'^rvpsa-[0-9a-f]{8}$', rvpsa_id)
if not match:
return False
return True
|
aabe9e64dbc9003f3cc5f18842b91f4a3e34c8a8
| 695,782 |
def compute_prior_probability(alpha):
"""
Calculate the probability of the node being a LeafNode (1 - p(being SplitNode)).
Taken from equation 19 in [Rockova2018].
Parameters
----------
alpha : float
Returns
-------
list with probabilities for leaf nodes
References
----------
.. [Rockova2018] Veronika Rockova, Enakshi Saha (2018). On the theory of BART.
arXiv, `link <https://arxiv.org/abs/1810.00787>`__
"""
prior_leaf_prob = [0]
depth = 1
while prior_leaf_prob[-1] < 1:
prior_leaf_prob.append(1 - alpha**depth)
depth += 1
return prior_leaf_prob
|
664734536a8a973bf77e6d9e723dc2954f663e21
| 695,786 |
def lat_opposite(side):
"""
Returns the lateral opposite as defined by the keyword pair {"Right", "Left"}
"""
if side == 'Right': return 'Left'
elif side == 'Left': return 'Right'
else: raise ValueError("Lateral side error, (%s)" % side)
|
2273b4e43e37fd206cac52d81591afa12ecf68ee
| 695,787 |
def count_col_nans(col):
"""
Returns the number of NaNs of a specific column in the dataset.
Parameters:
col (pandas Series): Column in the dataset
Returns:
col_count_nans (float): Count of NaNs in col
"""
col_count_nans = col.isna().sum()
return col_count_nans
|
ea055c003805112dbebd11fa5b9beea8ecc4c127
| 695,790 |
def parse_headers(env):
"""Parse HTTP headers out of a WSGI environ dictionary
Args:
env: A WSGI environ dictionary
Returns:
A dict containing (name, value) pairs, one per HTTP header
Raises:
KeyError: The env dictionary did not contain a key that is required by
PEP-333.
TypeError: env is not dictionary-like. In other words, it has no
attribute '__getitem__'.
"""
# Parse HTTP_*
headers = {}
for key in env:
if key.startswith('HTTP_'):
headers[key[5:]] = env[key]
# Per the WSGI spec, Content-Type is not under HTTP_*
if 'CONTENT_TYPE' in env:
headers['CONTENT_TYPE'] = env['CONTENT_TYPE']
# Per the WSGI spec, Content-Length is not under HTTP_*
if 'CONTENT_LENGTH' in env:
headers['CONTENT_LENGTH'] = env['CONTENT_LENGTH']
# Fallback to SERVER_* vars if the Host header isn't specified
if 'HOST' not in headers:
host = env['SERVER_NAME']
port = env['SERVER_PORT']
if port != '80':
host = ''.join([host, ':', port])
headers['HOST'] = host
return headers
|
31c2d2eac9a888535d57ecaf57c91748173bd948
| 695,791 |
def get_node_proto(graph_proto, node_name):
"""Get a `NodeProto` from `GraphProto` by node name.
Args:
graph_proto: A `GraphProto`.
node_name: Name of the node.
Returns:
A `NodeProto` or None.
"""
for node_proto in graph_proto.nodes:
if node_proto.name == node_name:
return node_proto
return None
|
ddc1aebeb3450de8dd6dfa85b988997a163601b4
| 695,798 |
import warnings
def warn(action):
"""Set warnings filter"""
warnings.simplefilter(action)
return action
|
143fb081685769b9d189c1600ca6a51c4a084d70
| 695,799 |
def fetch_project(api, project_name=None, project_id=None):
"""
fetch a project from the sb api.
:param api: API object generated by sevenbridges.Api()
:type api: Sevenbridges API Object
:param project_name: name of a project to return e.g. 'forsure'
:type project_name: string
:param project_id: username/project name pair - e.g. 'doesnotexist/forsure'
:type project_id: string
:return: Project object from the sevenbridges api
:rtype: sevenbridges.models.project.Project
"""
if project_id:
project = api.projects.get(id=project_id)
if not project.id:
print(
f"""Project {project_id} not found. Check spelling
(especially trailing spaces)"""
)
raise KeyboardInterrupt
else:
return project
elif project_name:
project_list = api.projects.query(name=project_name)
if not project_list:
print(
f"""Project {project_name} not found. Check spelling
(especially trailing spaces)"""
)
raise KeyboardInterrupt
else:
return project_list[0]
else:
print("No project passed.")
raise KeyboardInterrupt
|
f33d70367bd4a52ae0099a83110182afdb8862d5
| 695,800 |
def filter_any_answer(group):
"""Filter questions answered by anyone in group."""
answers = set()
for person in group:
for question in person:
answers.add(question)
return answers
|
5dca4f80e069bb3e9d9541145cbbc18eb22daf3f
| 695,804 |
from typing import Iterable
def clean_key(k: Iterable[str]) -> str:
"""
Utility function for formatting keys.
This is a no-op if the input is a string, otherwise expects an iterable
of strings, which it joins with a period.
"""
return k if isinstance(k, str) else '.'.join(k)
|
78f241056141a2549ae981a7ab18268e23fd2b0a
| 695,807 |
def getFieldShortName(field_name):
""" Simplifies `field_name` in the exported dataframe by removing Watson Assistant prefixes """
return field_name.replace('request.','').replace('response.','').replace('context.system.','').replace('context.','')
|
42c05ebef5d6ec0fe23ffa789f67a8aa37f364bd
| 695,809 |
def hyperlink_title(body, docpath, docname):
"""
Hyperlink titles by embedding appropriate a tag inside
h1 tags (which should only be post titles).
"""
body = body.replace("<h1>", '<h1><a href="%s.html">' %
(docpath + docname), 1)
body = body.replace("</h1>", "</a></h1>", 1)
return body
|
ae316226ef64a45c97cd6d094617edc2624d1cc8
| 695,810 |
import toml
def get_version(poetry="pyproject.toml") -> str:
"""Get the version of the package from pyproject file"""
with open("pyproject.toml", "r", encoding="utf-8") as f:
data = toml.loads(f.read())
return data["tool"]["poetry"]["version"].strip()
|
56c4d658dbca656bd5964080036c9ca3df079f0d
| 695,817 |
def cp_lt_stage_aware(cp0, cp1):
""" Less-than comparison of two CriticalPath objects in stage-aware mode """
if cp0.stage is None:
# CriticalPath with None stage is always shorter than any other
# critical path
return True
elif cp1.stage is None:
return False
return (cp0.stage < cp1.stage) or (cp0.stage == cp1.stage and cp0.comb_latency < cp1.comb_latency)
|
783ee58a893bb52cd515605728ab4df743661052
| 695,818 |
def mul(value, arg):
"""Multiplication
>>> mul(2, 2)
4
"""
return value * arg
|
50e8f39e52c754c8448f5ce32c040e3e5106af75
| 695,822 |
from typing import Optional
from typing import Dict
def get_first_api_gateway(api_gateway_client, api_gateway_name: str) -> Optional[Dict]:
"""
Get the first API Gateway with the given name. Note, that API Gateways can have the same name.
They are identified by AWS-generated ID, which is unique. Therefore this method lists all API
Gateways and returns the first one with matching name. If no matching name is found, None is returned.
Args:
api_gateway_client: API Gateway V2 Client.
api_gateway_name: Name of the API Gateway function.
Returns: Either a dictionary containing the get_api response, or None if it doesn't exist
"""
response = api_gateway_client.get_apis()
apis = response.get("Items", [])
# Limit the number of times we page through the API.
for _ in range(10):
# Try finding the match before getting the next batch of api gateways from AWS
for api in apis:
if api.get("Name") == api_gateway_name:
return api
# Break out of the loop if there's no next batch of api gateways
next_token = response.get("NextToken")
if not next_token:
break
# Get the next batch of api gateways using next_token
response = api_gateway_client.get_apis(NextToken=next_token)
apis = response.get("Items", [])
# Return None if API Gateway with such name was not found
return None
|
fee768f319f2670ecaf9f8c6c66fffc62bcd66f3
| 695,823 |
def is_sorted(items):
"""Return a boolean indicating whether given items are in sorted order.
Running time: O(n) because at most loop through the entire array
Memory usage: O(1) because not creating any new space and everything is done in place"""
for i in range(len(items) - 1):
# if next item is smaller than current, then list not sorted
if items[i+1] < items[i]:
return False
return True
|
59094cd421c104509e54d7c8a40e27b4fcb97d63
| 695,825 |
def remove_empty(data):
"""Removes empty items from list"""
out = []
for item in data:
if item == '':
continue
out.append(item)
return out
|
9ef46381bb76846c92375f47eb481a26359b1d92
| 695,827 |
import re
def MatchPattern(file_path, pattern):
"""Looks for matching specified pattern in the file line by line.
Args:
file_path: file path.
pattern: pattern for matching.
Returns:
the match or None.
"""
try:
with open(file_path, "r") as f:
prog = re.compile(pattern)
for line in f:
match = prog.match(line)
if match:
return match
except IOError:
pass
except Exception:
pass
return None
|
1ab4f7cf675c3be72bd6b01fb6f6c7fee2668275
| 695,830 |
def rem_item_from_list(item, string):
""" Removes all occurrences of token from string. If no occurrences of
items are in string, nothing is removed."""
return string.replace(item, "")
|
5b0406c57aed3b786c4f20501be80e18f945928f
| 695,833 |
from bs4 import BeautifulSoup
def strip_html(string: str):
"""
Use BeautifulSoup to strip out any HTML tags from strings.
"""
return BeautifulSoup(string, "html.parser").get_text()
|
796fc52ddd303906c7fd217275cb2a897e76767c
| 695,836 |
def email_sort(email):
""" Split the given email address into a reverse order tuple, for sorting i.e (domain, name) """
return tuple(reversed(email[0].split('@')))
|
d841ea1f468d11e89df5d493ac74c28705bd6c27
| 695,837 |
from typing import List
import math
def euclidian_distance(a: List[float], b: List[float]) -> float:
""" Returns the euclidian distance between two N-dimensional points """
return math.sqrt(sum((x - y) ** 2 for (x, y) in zip(a, b)))
|
57aac940e12c64978c7ecc4aea567653d6ee780a
| 695,838 |
def cu_gene(obj):
"""Extracts the gene name from a codon usage object in std. format"""
return str(obj.Gene).lower()
|
5df9facb6de7b954efe8e273f83c4f9cc9b2725a
| 695,840 |
import json
def load_config(config_filename):
"""
Load the population config for this simulation.
Args:
config_filename (str): Filename for the simulation's config.
Returns:
Dict containing data from the config file.
"""
if not config_filename:
return {}
config_data = []
with open(config_filename) as config_file:
config_data = json.loads(config_file.read())
if not config_data:
raise RuntimeError("No Config Loaded: {}".format(config_filename))
return config_data
|
fffa9ceade1f83ff142da2b8de6484647e165dd8
| 695,841 |
import torch
def extract_slice_from_mri(image, index_slice, view):
"""
This is a function to grab one slice in each view and create a rgb image for transferring learning: duplicate the slices into R, G, B channel
:param image: (tensor)
:param index_slice: (int) index of the wanted slice
:param view:
:return:
To note, for each view:
Axial_view = "[:, :, slice_i]"
Coronal_view = "[:, slice_i, :]"
Sagittal_view= "[slice_i, :, :]"
"""
# reshape the tensor, delete the first dimension for slice-level
image_tensor = image.squeeze(0)
# sagittal
if view == 0:
slice_select = image_tensor[index_slice, :, :].clone()
# coronal
elif view == 1:
slice_select = image_tensor[:, index_slice, :].clone()
# axial
elif view == 2:
slice_select = image_tensor[:, :, index_slice].clone()
else:
raise ValueError("This view does not exist, please choose view in [0, 1, 2]")
extracted_slice = torch.stack((slice_select, slice_select, slice_select))
return extracted_slice
|
84b6120aab497f03347f5a76ba7a42abe8bb4741
| 695,843 |
def get_buildings_in_buffer(buf, buildings, ids, idx):
"""
Input the buffer polygon and building geometries to check
if the building intersects with the buffer. Return all
buildings within the buffer (based on ID). An R-tree
is used to speed up things.
"""
bld_in_buffer = {}
for i in idx.intersection(buf.bounds):
if buf.intersects(buildings[i]):
bld_in_buffer[ids[i]] = buildings[i]
return bld_in_buffer
|
ffb55879125997f824965998999225311c221c33
| 695,844 |
def needs_escaping(text: str) -> bool:
"""Check whether the ``text`` contains a character that needs escaping."""
for character in text:
if character == "\a":
return True
elif character == "\b":
return True
elif character == "\f":
return True
elif character == "\n":
return True
elif character == "\r":
return True
elif character == "\t":
return True
elif character == "\v":
return True
elif character == '"':
return True
elif character == "\\":
return True
else:
pass
return False
|
43110dacad107ab835e76997edc56bba113694b8
| 695,854 |
import torch
def _update_mem(inp_tokens, memory):
"""This function is for updating the memory for transformer searches.
it is called at each decoding step. When being called, it appends the
predicted token of the previous step to existing memory.
Arguments:
-----------
inp_tokens : tensor
Predicted token of the previous decoding step.
memory : tensor
Contains all the predicted tokens.
"""
if memory is None:
return inp_tokens.unsqueeze(1)
return torch.cat([memory, inp_tokens.unsqueeze(1)], dim=-1)
|
ef4dc9ed0fd32d207cf584a3c180bf0bb3d6082f
| 695,856 |
def CalculateBoxSize(nmol, molwt, density):
"""
Calculate the size of a solvent box.
Parameters
----------
nmol : int
Number of molecules desired for the box
molwt : float
Molecular weight in g/mol
density : float
Estimated density in kg/m3 (this should be about 40-50% lower than the real liquid density)
Returns
-------
float
Length of a cubic solvent box in nm.
"""
# Calculate total mass of the box in kg
mass = nmol * molwt / 1000 / 6.022e23
volume = mass / density
length = volume**(1./3)/1e-9
return length
|
9508b740c07edd78e33be9cf6f30414e32ab8953
| 695,858 |
def tril_count_from_matrix_dim(matrix_dim: int):
"""Computes the number of lower triangular terms in a square matrix of a given
dimension `(matrix_dim, matrix_dim)`.
Args:
matrix_dim (int): Dimension of square matrix.
Returns:
int: Count of lower-triangular terms.
"""
tril_count = (matrix_dim ** 2 - matrix_dim) // 2 + matrix_dim
return tril_count
|
d1e350986a09c239959de77b821003b5189f6c98
| 695,859 |
def sgd(l_rate, parameters, grads):
"""
Stochastic Gradient Descent.
Parameters
----------
:type lr: theano.tensor.scalar
:param lr: Initial learning rate
:type parameters: theano.shared
:params parameters: Model parameters to update
:type grads: Theano variable
:params grads: Gradients of cost w.r.t to parameters
"""
updates = []
for param, grad in zip(parameters, grads):
updates.append((param, param - l_rate * grad))
return updates
|
aaa1d11788669801b4edd89aca29b38258043ff1
| 695,861 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.