content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def weave(devicePairs):
"""
"""
routers = [x[0] for x in devicePairs if x[0][1] == "router.PNG"]
selected = []
for devicePair in devicePairs:
starterDevice = devicePair[0]
if starterDevice[1] == "router.PNG":
continue
starterPosition = maths.getCenter(tuple(starterDevice[0]))
distances = []
for (endPosition, endDevice) in devicePair[1:]:
distances.append(maths.getDistance(starterPosition, maths.getCenter(endPosition)))
#if starterDevice[1] == "router.PNG":
# distances[distances.index(min(distances))] = np.Infinity
closestIndex = distances.index(min(distances))
closestDevice = devicePair[closestIndex + 1]
selected.append((starterDevice, closestDevice))
return selected | 6e312f2c89007e67efdb23d93c103e3f7583d48a | 1,487 |
def change_image_ani(image: _Surface,
name: _Optional[str] = None,
id_: _Optional[int] = None) -> _TextureAni:
"""
change_image_ani(image, name=None, id_None)
Type: function
Description: returns a TextureAni that simply changes the image of
an AniElement
Args:
'image' (pygame.Surface): the image to change the element to
'name' (str?): the name of the animation, defaults to None
'id_' (int?): the ID of the animation, defaults to None
Return type: TextureAni
"""
return _TextureAni(
name=name,
frames=[image],
time=0,
id_=id_,
reset_on_end=False
) | ff68e741937512d70ad714e54df037940154467f | 1,488 |
def import_string(dotted_path):
"""
Import a dotted module path and return the attribute/class designated by the
last name in the path. Raise ImportError if the import failed.
"""
try:
module_path, class_name = dotted_path.rsplit('.', 1)
except ValueError as err:
raise ImportError("%s doesn't look like a module path" % dotted_path) from err
module = import_module(module_path)
try:
return getattr(module, class_name)
except AttributeError as err:
raise ImportError('Module "%s" does not define a "%s" attribute/class' % (
module_path, class_name)
) from err | 06a014f531944eb0f5d428e5f2880a1e91de797c | 1,489 |
def read_u16(f):
"""Reads a two byte unsigned value from the file object f.
"""
temp = f.read(2)
if not temp:
raise EOFError("EOF")
return int.from_bytes(temp, byteorder='little', signed=False) | 03478ce0fd4076ca3a0c4ea2f687cca254ba7052 | 1,490 |
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasQTAgg(figure)
return FigureManagerQT(canvas, num) | 2862a3a2c456fcfe5461f011a28bdf5ec94971a8 | 1,491 |
def process_image(sample, settings, mode, color_jitter, rotate):
""" process_image """
mean = settings.image_mean
std = settings.image_std
crop_size = settings.crop_size
img_path = sample[0]
img = cv2.imread(img_path)
if mode == 'train':
if rotate:
img = rotate_image(img)
if crop_size > 0:
img = random_crop(
img, crop_size, settings, interpolation=settings.interpolation)
if color_jitter:
img = distort_color(img)
if np.random.randint(0, 2) == 1:
img = img[:, ::-1, :]
else:
if crop_size > 0:
target_size = settings.resize_short_size
img = resize_short(
img, target_size, interpolation=settings.interpolation)
img = crop_image(img, target_size=crop_size, center=True)
img = img[:, :, ::-1]
if 'use_aa' in settings and settings.use_aa and mode == 'train':
img = np.ascontiguousarray(img)
img = Image.fromarray(img)
img = policy(img)
img = np.asarray(img)
img = img.astype('float32').transpose((2, 0, 1)) / 255
img_mean = np.array(mean).reshape((3, 1, 1))
img_std = np.array(std).reshape((3, 1, 1))
img -= img_mean
img /= img_std
if mode == 'train' or mode == 'val':
return (img, sample[1])
elif mode == 'test':
return (img, ) | 230991f4078c9963731355b276a6e351f7bcbab9 | 1,492 |
def convert_pressures(a, from_units, to_units):
"""Converts values in numpy array (or a scalar) from one pressure unit to another, in situ if array.
arguments:
a (numpy float array, or float): array of pressure values to undergo unit conversion in situ, or a scalar
from_units (string): the units of the data before conversion
to_units (string): the required units
returns:
a after unit conversion
note:
To see supported units, use: `valid_uoms(quantity='pressure')`
"""
return convert(a, from_units, to_units, quantity = 'pressure', inplace = True) | d25ca383fe0cfaf6e756958ff99aebf2b06e13a9 | 1,494 |
def amovie(stream: Stream, *args, **kwargs) -> FilterableStream:
"""https://ffmpeg.org/ffmpeg-filters.html#amovie"""
return filter(stream, amovie.__name__, *args, **kwargs) | 60daca8722bb42b34231a82dd3c9175108af8f9b | 1,495 |
import numpy
def kutta_condition(A_source, B_vortex):
"""
Builds the Kutta condition array.
Parameters
----------
A_source: 2D Numpy array of floats
Source contribution matrix for the normal velocity.
B_vortex: 2D Numpy array of floats
Vortex contribution matrix for the normal velocity.
Returns
-------
b: 1D Numpy array of floats
The left-hand side of the Kutta-condition equation.
"""
b = numpy.empty(A_source.shape[0]+1, dtype=float)
# matrix of source contribution on tangential velocity
# is the same than
# matrix of vortex contribution on normal velocity
b[:-1] = B_vortex[0, :] + B_vortex[-1, :]
# matrix of vortex contribution on tangential velocity
# is the opposite of
# matrix of source contribution on normal velocity
b[-1] = - numpy.sum(A_source[0, :] + A_source[-1, :])
print(b)
return b | 0009018c39c21f1bc3b98745ea7a475f0a7e6fe7 | 1,496 |
def absorption_sinogram(p, anglelist):
"""Generates the absorption sinogram for absorption by the full
elemental content of the Phantom2d object.
Parameters
----------
p : Phantom2d object
anglelist : list of float
Ordered list of sinogram projection angles in degrees.
Returns
-------
array of float
Sinogram of requested scattering or fluorescence.
This is a 2d x-theta map of dimensionless values.
"""
sinogram = np.empty((p.cols, len(anglelist)))
if config.show_progress:
pbar = ProgressBar(maxval=max(1, len(anglelist)-1), term_width=80).start()
for i, angle in enumerate(anglelist):
if config.show_progress:
pbar.update(i)
increasing_ix = True # Set True to accumulate cmam along increasing y
n_map = irradiance_map(p, angle, n0=1.0, increasing_ix=increasing_ix)
if increasing_ix:
sinogram[:, i] = np.log(n_map[0] / n_map[-1])
else:
sinogram[:, i] = np.log(n_map[-1] / n_map[0])
return sinogram | 37f5048b9207221387c2410e2bb0be20bafc8dcf | 1,497 |
def trace_feature_vector_from_nodes(embeddings, traces, dimension):
"""
Computes average feature vector for each trace
Parameters
-----------------------
embeddings,
Text-based model containing the computed encodings
traces: List,
List of traces treated as sentences by the model
Returns
-----------------------
vectors: List
list of vector encodings for each trace
"""
vectors_average, vectors_max = [], []
for trace in traces:
trace_vector = []
for token in trace:
try:
trace_vector.append(embeddings[token])
except KeyError:
pass
if len(trace_vector) == 0:
trace_vector.append(np.zeros(dimension))
vectors_average.append(np.array(trace_vector).mean(axis=0))
vectors_max.append(np.array(trace_vector).max(axis=0))
return vectors_average, vectors_max | 93efdf6da293bd6af61c1c77e8b19c76c6b71193 | 1,498 |
def jitter_rotate(drawing, sigma=0.2):
"""
Rotate an entire drawing about 0,0 by a random gaussian.
"""
rotation = np.random.randn(1) * sigma
matrix = create_rotation_matrix(rotation)
return [np.dot(stroke, matrix).squeeze() for stroke in drawing] | 058709f6a84e99fbd8899e3e6c4aed09b7c0ad6e | 1,499 |
def is_modified(filename: str) -> bool:
"""
Given a filename return if it has been modified
"""
global new_hashes
global old_hashes
if filename in old_hashes.keys():
if old_hashes[filename] == new_hashes[filename]:
return False
return True | f5f191a9fc714d0431d8c464630ab6b0c95f13dd | 1,500 |
def _is_url_without_path_query_or_fragment(url_parts):
"""
Determines if a URL has a blank path, query string and fragment.
:param url_parts: A URL.
:type url_parts: :class:`urlparse.ParseResult`
"""
return url_parts.path.strip('/') in ['', 'search'] and url_parts.query == '' \
and url_parts.fragment == '' | 4bad1f230adfa77df019519db276a181d57682dd | 1,501 |
import math
def wgs84_distance(lat1, lon1, lat2, lon2):
"""Distance (in meters) between two points in WGS84 coord system."""
dLat = math.radians(lat2 - lat1)
dLon = math.radians(lon2 - lon1)
a = (math.sin(dLat / 2) * math.sin(dLat / 2) +
math.cos(math.radians(lat1)) * math.cos(math.radians(lat2)) *
math.sin(dLon / 2) * math.sin(dLon / 2))
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
d = EARTH_RADIUS * c
return d | b700c218c172843922762b741f37b25996fdc047 | 1,503 |
def optimize_acq_func(acq_func: AcquisitionFunction, bounds=None, options=None):
"""Optimizes the acquisition function"""
# optimize
candidates, _ = optimize_acqf(
acq_function=acq_func,
bounds=bounds,
q=1,
num_restarts=20,
raw_samples=512,
options=options,
)
new_x = candidates.detach()
return new_x | 9aef150a89f646f8f65efe656a2987a7afe9f917 | 1,504 |
import json
def _recover_distributor(lb_id):
"""Get cached Distributor object or generate from ovs external_ids
{
'dist-lb-id': lb_id,
'dist-vip': vip,
'dist-size': size,
'dist-status': status,
'dist-mac': mac,
'dist-hash-fields': field-list,
'dist-ofport': ofport, # of external iface
'slot-100': 'amphora_id,mac',
'slot-101': 'amphora_id,mac',
'slot-...': 'amphora_id,mac',
}
"""
if _provision_state.state == DISTRIBUTOR_BOOTING:
msg = _('Error while recovering loadbalancer %(lb)s.'
' Server status is %(status)s'
) % dict(lb=lb_id, status=_provision_state.state)
LOG.error(msg)
raise DistributorUsageError(msg)
if lb_id in _distributors:
return _distributors[lb_id]
ret, out, err = _run_vsctl(
VSCTL_FIND_EXTERNAL_ID.format(key='dist-lb-id',
value=lb_id),
extra_args=[VSCTL_JSON_FORMAT])
if ret != 0:
msg = _('Error while recovering loadbalancer %(lb)s.'
' Find failed with exit_status=%(ret)d'
'\nsterr=%(err)s'
) % dict(lb=lb_id, ret=ret, err=err)
LOG.error(msg)
_provision_state.go_error(msg)
raise DistributorFatalError(msg)
# ovs json is a nested [tpye, value] list
# br_list = {'data': [[br_name,
# ['map',
# [['dist-lb-id', lb_id],
# ['dist-vip', vip],
# ['dist-size', size],
# ['dist-status', status],
# ['dist-mac', mac],
# ['dist-hash-fields', field-list],
# ['dist-ofport', ofport],
# ['slot-100', amphora_id,mac],
# ['slot-101', amphora_id,mac],
# ['slot-...', amphora_id,mac]]]]]
# 'headings': ['name', 'external_ids']}
try:
br_list = json.loads(out)
br_name = br_list['data'][0][0]
br_properties = dict(br_list['data'][0][1][1])
except (ValueError, KeyError, IndexError, TypeError):
msg = _('Error while recovering loadbalancer %(lb)s.'
' Could not parse find results %(out)s.'
) % dict(lb=lb_id, out=out)
LOG.error(msg)
_provision_state.go_error(msg)
raise DistributorFatalError(msg)
found_id = br_properties.pop('dist-lb-id', None)
if lb_id != found_id or len(br_list['data']) != 1:
msg = _('Error while recovering loadbalancer %(lb)s. None or'
' duplicate bridge found. out=%(out)s'
) % dict(lb=lb_id, out=br_list)
LOG.error(msg)
return None
# one error type for all property parsing issues, catch all
# expected errors
try:
vip = netaddr.IPAddress(br_properties.pop('dist-vip'))
size = int(br_properties.pop('dist-size'))
status = br_properties.pop('dist-status')
assert status in (ONLINE, DEGRADED, ERROR, NO_MONITOR)
mac = netaddr.EUI(br_properties.pop('dist-mac'),
dialect=netaddr.mac_unix)
iface = _interface_by_mac(mac)
hash_selection_fields = br_properties.pop(
'dist-hash-fields').split(',')
ofport = int(br_properties.pop('dist-ofport'))
except (AssertionError, KeyError, ValueError, UnicodeDecodeError,
AddrFormatError, TypeError, IndexError,
NotImplementedError, AddrConversionError, StopIteration):
# we have a bridge name so we should try to delete it
ret, out, err = _run_vsctl(VSCTL_DEL_BR.format(br_name))
killed = 'killed' if ret == 0 else 'kill failed: stderr=%s' % err
msg = _('Error while recovering loadbalancer %(lb)s.'
' bad bridge properties %(props)s.'
' Killing bridge %(kill_msg)s'
) % dict(lb=lb_id, props=br_properties, kill_msg=killed)
LOG.error(msg)
raise DistributorInstanceError(msg)
distributor = _Distributor(name=br_name, lb_id=lb_id, vip=vip,
mac=mac, iface=iface, size=size)
for slot in range(DST_GROUPS_OFFSET, DST_GROUPS_OFFSET + size):
slot_key = SLOT_KEY_FORMAT.format(slot)
if slot_key in br_properties:
amphora_id, amphora_mac = br_properties[slot_key].split(',')
# mac = netaddr.EUI(amphora_mac, dialect=netaddr.mac_unix)
distributor.destinations[amphora_id] = slot, amphora_mac
else:
distributor.free_slots.add(slot)
distributor.hash_selection_fields = hash_selection_fields
distributor.fail = (ERROR == status)
distributor.ofport = ofport
_distributors[lb_id] = distributor
return distributor | a97cb4843515cf83314044af72a91b344d475a2d | 1,505 |
def setup_dispatcher(dp):
"""
Adding handlers for events from Telegram
"""
# commands
dp.add_handler(CommandHandler("start", commands.command_start))
dp.add_handler(CommandHandler("help", commands.command_help))
# admin & mod commands
dp.add_handler(CommandHandler("admin", admin.admin_command))
dp.add_handler(CommandHandler("bot_stats", admin.bot_user_stats))
dp.add_handler(CommandHandler(f"{broadcast_command[1:]}", broadcast_command_with_message))
dp.add_handler(CommandHandler('add_mod', admin.add_moderator))
dp.add_handler(CommandHandler('remove_mod', admin.remove_moderator))
# conversations
pass
# callback queries
dp.add_handler(CallbackQueryHandler(broadcast_decision_handler, pattern=f"^{CONFIRM_DECLINE_BROADCAST}"))
return dp | 1b37f48a8e3f9cfe451edb321b20dbde88853a84 | 1,507 |
import re
import ast
def get_version():
"""Gets the current version"""
_version_re = re.compile(r"__VERSION__\s+=\s+(.*)")
with open("leaked/__init__.py", "rb") as init_file:
version = str(ast.literal_eval(_version_re.search(
init_file.read().decode("utf-8")).group(1)))
return version | a6c5a94ca3cb728af38075ac98105be6d82dd3cf | 1,508 |
import re
def dir_keys(path):
"""A function to take a path, and return a list of all the numbers in the path. This is
mainly used for sorting
by the parameters they contain"""
regex = '[-+]?[0-9]+(?:\.[0-9]+)?(?:[eE][-+]?[0-9]+)?' # matching any floating point
m = re.findall(regex, path)
if(m): val = m
else: raise ValueError('Your path does not contain any numbers')
val = list(map(float,val))
return val | c2c32772771c9bae23a1fcc949a509eaaf36d602 | 1,509 |
def generate_data(n=5, T=1000, random_state=None, initial_data=None):
"""
Parameter
---------
n : int
number of variables
T : int
number of samples
random_state : int
seed for np.random.seed
initial_data : list of np.ndarray
dictionary of initial datas
"""
T_spurious = 20
expon = 1.5
if initial_data is None:
permutation = np.random.permutation(n)
value = np.random.uniform(low=0.05, high=0.5, size=(n, n))
sign = np.random.choice([-1, 1], size=(n, n))
B0 = np.multiply(value, sign)
B0 = np.multiply(B0, np.random.binomial(1, 0.4, size=(n, n)))
B0 = np.tril(B0, k=-1)
B0 = B0[permutation][:, permutation]
value = np.random.uniform(low=0.05, high=0.5, size=(n, n))
sign = np.random.choice([-1, 1], size=(n, n))
B1 = np.multiply(value, sign)
B1 = np.multiply(B1, np.random.binomial(1, 0.4, size=(n, n)))
causal_order = np.empty(len(permutation))
causal_order[permutation] = np.arange(len(permutation))
causal_order = causal_order.astype(int)
else:
B0 = initial_data['B0']
B1 = initial_data['B1']
causal_order =initial_data['causal_order']
M1 = np.dot(np.linalg.inv(np.eye(n) - B0), B1);
ee = np.empty((n, T + T_spurious))
for i in range(n):
ee[i, :] = np.random.normal(size=(1, T + T_spurious));
ee[i, :] = np.multiply(np.sign(ee[i, :]), abs(ee[i, :]) ** expon);
ee[i, :] = ee[i, :] - np.mean(ee[i, :]);
ee[i, :] = ee[i, :] / np.std(ee[i, :]);
std_e = np.random.uniform(size=(n,)) + 0.5
nn = np.dot(np.dot(np.linalg.inv(np.eye(n) - B0), np.diag(std_e)), ee);
xx = np.zeros((n, T + T_spurious))
xx[:, 0] = np.random.normal(size=(n, ));
for t in range(1, T + T_spurious):
xx[:, t] = np.dot(M1, xx[:, t - 1]) + nn[:, t];
data = xx[:, T_spurious + 1 : T_spurious + T];
return data.T, B0, B1, causal_order | 5e5c09de44f6db1ba28cd953d6549bb8d31aa3ec | 1,510 |
from typing import List
import re
def _get_paragraphs(paragraphs: List[str]) -> List[str]:
"""
Returns the paragraphs of an article's body, annotated with HTML tags.
Args:
paragraphs (:obj:`List[str]`):
List of strings denoting paragraphs.
Returns:
:obj:`List[str]`:
List of paragraphs annotated with HTML tags.
"""
paragraphs = [_add_html_tag(paragraph, 'p') for paragraph in paragraphs if not re.findall('trends.embed.renderExploreWidget', paragraph)]
return paragraphs | a4030efd2145fb15435912a1e08354cabba209e8 | 1,511 |
from scipy.stats import gaussian_kde
def calculate_kde(
ascending: bool = True,
evaluate: bool = False,
input_ts="-",
columns=None,
start_date=None,
end_date=None,
clean=False,
skiprows=None,
index_type="datetime",
source_units=None,
target_units=None,
names=None,
):
"""Return the kernel density estimation (KDE) curve."""
tsd = tsutils.common_kwds(
input_ts,
skiprows=skiprows,
names=names,
index_type=index_type,
start_date=start_date,
end_date=end_date,
pick=columns,
source_units=source_units,
target_units=target_units,
clean=clean,
)
if len(tsd.columns) > 1:
raise ValueError(
tsutils.error_wrapper(
"""
Right now "calculate_kde" only support one time-series at a time.
You gave {}.
""".format(
tsd.columns
)
)
)
tmptsd = tsd.dropna()
ndf = tmptsd.sort_values(tmptsd.columns[0], ascending=ascending)
gkde = gaussian_kde(ndf.iloc[:, 0])
if evaluate is True:
y = gkde.evaluate(tmptsd.iloc[:, 0])
ndf = pd.DataFrame(y, index=tmptsd.index)
else:
y = gkde.evaluate(ndf.iloc[:, 0])
ndf = pd.DataFrame(y)
return ndf | d654fe75030b8c99361096650c71835aad2d6b3a | 1,512 |
def EPmulk(a, da, k):
"""
C = A * k
"""
return a * k, np.absolute(da * k) | 4fb2b7ff28db1ff13fa2aa0c68f5d0c25e9ba3d9 | 1,513 |
def decrypt_location(location):
"""Decrypts the `location` field in Xiami responses to URL."""
if not location:
return None
rows, url = int(location[:1]), location[1:]
urllen = len(url)
cols_base = urllen // rows # basic column count
rows_ex = urllen % rows # count of rows that have 1 more column
matrix = []
for r in range(rows):
length = cols_base + 1 if r < rows_ex else cols_base
matrix.append(url[:length])
url = url[length:]
url = ''
for i in range(urllen):
url += matrix[i % rows][i // rows]
return parse.unquote(url).replace('^', '0') | 2fc3062df2786550e2b4839fae4aee5668963cc1 | 1,515 |
def sqd_yinfast(samples):
""" compute approximate sum of squared difference
Using complex convolution (fast, cost o(n*log(n)) )"""
# yin_t(tau) = (r_t(0) + r_(t+tau)(0)) - 2r_t(tau)
B = len(samples)
W = B//2
yin = np.zeros(W)
sqdiff = np.zeros(W)
kernel = np.zeros(B)
# compute r_(t+tau)(0)
squares = samples**2
for tau in range(W):
sqdiff[tau] = squares[tau:tau+W].sum()
# add r_t(0)
sqdiff += sqdiff[0]
# compute r_t(tau) using kernel convolution in complex domain
samples_fft = np.fft.fft(samples)
kernel[1:W+1] = samples[W-1::-1] # first half, reversed
kernel_fft = np.fft.fft(kernel)
r_t_tau = np.fft.ifft(samples_fft * kernel_fft).real[W:]
# compute yin_t(tau)
yin = sqdiff - 2 * r_t_tau
return yin | c97e130960336074f6b0c30590ab8a044b8d63e5 | 1,516 |
def get_colours_extend(graph_size, start_set, end_set, source, target, reachable=None):
"""
Get colours for nodes including source and target nodes.
Blue nodes are those in the source set.
Orange nodes are those in the start set, not in the source set.
Green nodes are those reachable from the source that are in target.
Red nodes are those in target that are not reachable from the source.
All other nodes are grey.
"""
# Setup the colours
c = []
if reachable is None:
reachable = end_set
for acc_val in range(graph_size):
if acc_val in start_set:
if acc_val in source:
c.append("dodgerblue")
else:
c.append("darkorange")
elif acc_val in target:
if acc_val in reachable:
c.append("g")
else:
c.append("r")
else:
c.append("gray")
return c | d366ed6c4c387d0b4de4440d34d358d5a142661a | 1,517 |
def suspend_circuit():
"""
Suspends the circuits for some seconds, allowing the user to exit the house without playing the song.
"""
circuit.suspend()
return render_template("suspend.html", seconds=EXIT_HOUSE_TIMER, name=get_guest_name()) | 2336207150163ecd302dda6c56758a5405152aec | 1,518 |
def get_scalar_data_from_path(udatapath, name='pressure', x0=0, x1=None, y0=0, y1=None, z0=0, z1=None,
t0=0, t1=None, inc=1, frame=None, return_xy=False, verbose=True,
slicez=None, crop=None, mode='r',
reverse_x=False, reverse_y=False, reverse_z=False):
"""
Returns a scalar data from a path of udata
... There could be a case that a scalar data such as temperature and pressure is also stored in udata.h5
... This function serves as a reader of such a quantity
If return_xy is True, it returns udata, xx(2d grid), yy(2d grid)
Parameters
----------
udatapath: str, a path to udata
name: str, name of the dataset in the udata h5
x0: int
x1: int
y0: int
y1: int
t0: int
t1: int
inc: int
time increment of data to load from udatapath, default: 1
frame: array-like or int, default: None
If an integer is given, it returns a velocity field at that instant of time
If an array or a list is given, it returns a velocity field at the given time specified by the array/list.
By default, it loads data by a specified increment "inc".
If "frame" is given, it is prioritized over the incremental loading.
return_xy: bool, defualt: False
verbose: bool
If True, return the time it took to load udata to memory
Returns
-------
pdata, (optional- xx, yy, zz(if 3D)
"""
f = h5py.File(udatapath, 'r')
keys = list(f.keys())
f.close()
###
if not name in keys:
raise ValueError('%s does not exist in the given path' % name)
else:
if verbose:
tau0 = time_mod.time()
print('... reading %s from the path' % name)
if crop is not None and [x0, x1, y0, y1, z0, z1] == [0, None, 0, None, 0, None]:
x0, x1, y0, y1, z0, z1 = crop, -crop, crop, -crop, crop, -crop
if mode == 'w' or mode == 'wb':
raise ValueError('... w was passed to h5Py.File(...) which would delete the file if it exists. \n'
'Probably, this is not what you want. Pass r for read-only')
with h5py.File(udatapath, 'r') as f:
if 'z' in f.keys():
dim = 3
else:
dim = 2
if dim == 2:
if frame is None:
pdata = f[name][y0:y1, x0:x1, t0:t1:inc]
else:
frame = np.asarray(frame)
pdata = f[name][y0:y1, x0:x1, frame]
if return_xy:
xx, yy = f['x'][y0:y1, x0:x1], f['y'][y0:y1, x0:x1]
elif dim == 3:
if frame is None and slicez is None:
pdata = f[name][y0:y1, x0:x1, z0:z1, t0:t1:inc]
elif frame is None and slicez is not None:
pdata = f[name][y0:y1, x0:x1, slicez, t0:t1:inc]
elif frame is not None and slicez is not None:
frame = np.asarray(frame)
pdata = f[name][y0:y1, x0:x1, slicez, frame]
else:
frame = np.asarray(frame)
pdata = f[name][y0:y1, x0:x1, z0:z1, frame]
if return_xy:
if slicez is None:
xx, yy, zz = f['x'][y0:y1, x0:x1, z0:z1], f['y'][y0:y1, x0:x1, z0:z1], f['z'][y0:y1, x0:x1,
z0:z1]
else:
xx, yy, zz = f['x'][y0:y1, x0:x1, slicez], f['y'][y0:y1, x0:x1, slicez], f['z'][0, 0, slicez]
tau1 = time_mod.time()
if verbose:
print('... time took to load udata in sec: ', tau1 - tau0)
if return_xy:
if dim == 2:
if reverse_x:
pdata[...] = pdata[:, ::-1, :]
xx[...] = xx[:, ::-1]
yy[...] = yy[:, ::-1]
if reverse_y:
pdata[...] = pdata[:, ::-1, :, :]
xx[...] = xx[::-1, :]
yy[...] = yy[::-1, :]
return pdata, xx, yy
elif dim == 3:
if reverse_x:
pdata[...] = pdata[:, ::-1, :, :]
xx[...] = xx[:, ::-1, :]
yy[...] = yy[:, ::-1, :]
zz[...] = zz[:, ::-1, :]
if reverse_y:
pdata[...] = pdata[::-1, :, :, :]
xx[...] = xx[::-1, :, :]
yy[...] = yy[::-1, :, :]
zz[...] = zz[::-1, :, :]
if reverse_z:
pdata[...] = pdata[:, :, ::-1, :]
xx[...] = xx[:, :, ::-1]
yy[...] = yy[:, :, ::-1]
zz[...] = zz[:, :, ::-1]
return pdata, xx, yy, zz
else:
return pdata | ef99d0e3dcd8a15b5c7759dac39fb3b7fbe09632 | 1,519 |
from statistics import mean
def create_transformed_df(old_df, elem_list, features_list):
"""elem_list should be in type list"""
new_dict = {}
for index, elems in zip(old_df.index, old_df[elem_list]):
for elem in elems:
if elem in new_dict.keys():
for j, feature in enumerate(features_list):
new_dict[elem][j].append(float(old_df.loc[index, feature]))
else:
new_dict[elem] = [[] for i in range(len(features_list))]
for j, feature in enumerate(features_list):
new_dict[elem][j].append(float(old_df.loc[index, feature]))
headers = [elem_list]
for i in features_list:
headers.append(f'avg_movie_{i}')
headers.append('number_of_movies') ##? how to name?
new_df = pd.DataFrame(columns=headers)
for key in new_dict:
row = []
row.append(key)
for i, col in enumerate(headers[1:-1]):
mean_val = mean(new_dict[key][i])
row.append(mean_val)
num = len(new_dict[key][0])
row.append(num)
length = len(new_df)
new_df.loc[length] = row
return new_df | c5d825f446839d9b6d921bf064bb07c102b82905 | 1,520 |
def sem_id_semester_get(semester, obs_id):
"""
retrieves all the sem_id associated with an observer for the semester.
:param semester: semester id
:type semester: str
:param obs_id: observer id
:type obs_id: int
:rtype: List[str]
"""
semester_list = []
sem_ids = utils.get_proposal_ids(obs_id)
for semid in sem_ids:
if semester in semid:
semester_list.append(semid)
return semester_list | d15b36ccbe1e7a6d2f2cb5016419e259df922881 | 1,521 |
def getLabels (dataMatrix, classOfInterest):
"""
Gets labels on a per class basis that will inputted to the randomForest function
Parameters
----------
dataMatrix : anndata object
The data file of interest
classOfInterest : str
The class you will split the data by in the set of dataMatrix.obs
Returns
-------
labelsDict : dict
Dictionary with labels for each class
"""
dataMatrix = filterNormalize (dataMatrix, classOfInterest)
labelsDict = {}
for label in np.unique(dataMatrix.obs[classOfInterest]):
lists = []
for obs in dataMatrix.obs[classOfInterest]:
if obs == label:
lists.append('A')
else:
lists.append('B')
labelsDict[label] = lists #this is usually in line w if and else
return labelsDict | bf7bcfc4afcd16deedbfcf27c9e1eb1a5dfa603a | 1,522 |
def load_file(file_location):
"""
Opens a given file and returns its contents.
:param str file_location: The absolute path to the file
:rtype: str
:return: The contents of the file
"""
with open(file_location, 'r') as file_contents:
contents = file_contents.read()
return contents | 61b78432cffa4c22adc9af31bbad63bf8777737b | 1,523 |
def create_bam(data, args):
"""
aligner and conversion to BAM file
"""
workdir = safe_makedir("align")
sample = data['name']
# workdir = op.join("align", sample)
data['final_bam'] = _align(data['trimmed'], sample, op.abspath(workdir),
args.index, args.is_directional, args.bowtie2,
args.reference, data['config'])
data['order_bam'] = data['final_bam']
return data | 81e77af7317f29277d42a37e46f0e5aa719cab3c | 1,524 |
def calculateStorageLocationsDistance(D_loc: pd.DataFrame, input_loccodex: float,
input_loccodey: float, output_loccodex: float,
output_loccodey: float) -> pd.DataFrame:
"""
calculate the sum of the rectangular distances from
Input point -> physical location -> Output point
Args:
D_loc (pd.DataFrame): Input location DataFrame.
input_loccodex (float): Input X coordinate.
input_loccodey (float): Input Y coordinate.
output_loccodex (float): Output X coordinate.
output_loccodey (float): Output Y coordinate.
Returns:
D_loc (TYPE): DESCRIPTION.
"""
D_loc = D_loc.dropna(subset=['LOCCODEX', 'LOCCODEY'])
D_loc['INPUT_DISTANCE'] = np.abs(input_loccodex - D_loc['LOCCODEX']) + np.abs(input_loccodey - D_loc['LOCCODEY'])
D_loc['OUTPUT_DISTANCE'] = np.abs(output_loccodex - D_loc['LOCCODEX']) + np.abs(output_loccodey - D_loc['LOCCODEY'])
return D_loc | 3432036119007cb1f33f69106cae8c2cf28d697b | 1,525 |
def join(words, sep = ' '):
"""join(list [,sep]) -> string
Return a string composed of the words in list, with
intervening occurrences of sep. The default separator is a
single space.
(joinfields and join are synonymous)
"""
return sep.join(words) | 2b6a293bc5faba31428f66f214e1991dd9878027 | 1,526 |
import codecs
def pickle(obj):
""" Creates a serialization of the provided object
Serialization is done by :mod:`pickle` module. If :mod:`cPickle` package is
available, that package will be used instead, yielding a gain in speed.
Parameters
----------
obj: :obj:`obj`
Object to be serialized.
Returns
-------
pickle: :obj:`pickle.pickle`
Serialized version of the provided object. """
return codecs.encode(pkl.dumps(obj), "base64").decode() | 3a36e7d3c1f0fd31a417df21701eb150e3c611a8 | 1,527 |
def calc_E_E_AP_d_t(n_p):
"""1 時間当たりの家電の消費電力量
Args:
n_p(float): 仮想居住人数 仮想居住人数
Returns:
ndarray: 1 時間当たりの家電の消費電力量
"""
schedule = load_schedule()
schedule_app = get_schedule_app(schedule)
if 1 <= n_p and n_p <= 2:
E_E_AP_1_d_t = get_E_E_AP_p_d_t(1, schedule_app)
E_E_AP_2_d_t = get_E_E_AP_p_d_t(2, schedule_app)
return E_E_AP_1_d_t * (2 - n_p) / (2 - 1) + E_E_AP_2_d_t * (n_p - 1) / (2 - 1)
elif 2 <= n_p and n_p <= 3:
E_E_AP_2_d_t = get_E_E_AP_p_d_t(2, schedule_app)
E_E_AP_3_d_t = get_E_E_AP_p_d_t(3, schedule_app)
return E_E_AP_2_d_t * (3 - n_p) / (3 - 2) + E_E_AP_3_d_t * (n_p - 2) / (3 - 2)
elif 3 <= n_p and n_p <= 4:
E_E_AP_3_d_t = get_E_E_AP_p_d_t(3, schedule_app)
E_E_AP_4_d_t = get_E_E_AP_p_d_t(4, schedule_app)
return E_E_AP_3_d_t * (4 - n_p) / (4 - 3) + E_E_AP_4_d_t * (n_p - 3) / (4 - 3)
else:
raise ValueError(n_p) | 645052eaedf7cc93d4b171f710d0a29e119fe7cf | 1,528 |
from typing import List
import torch
def Squeeze_forward(op: Operation, values: List[torch.Tensor], ctx: TorchBackendContext = None, **kwargs) -> torch.Tensor:
"""
Remove single-dimensional entries from the shape of a tensor.
Takes an input axes with a list of axes to squeeze.
If axes is not provided, all the single dimensions will be removed from the shape.
If an axis is selected with shape entry not equal to one, an error is raised.
Inputs (1 - 2)
data (differentiable) : T
Tensors with at least max(dims) dimensions.
axes (optional, non-differentiable) : tensor(int64)
List of integers indicating the dimensions to squeeze.
Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data).
Outputs
squeezed (differentiable) : T
Reshaped tensor with same data as input.
Args:
op (Operation): [description]
input_values (List[torch.Tensor]): [description]
Returns:
torch.Tensor: [description]
"""
ASSERT_ALL_TENSORS_AT_SAME_DEVICE(op=op, values=values)
ASSERT_NUM_OF_INPUT(op=op, values=values, min_num_of_input=1, max_num_of_input=2)
[squeezing_tensor], axes = values, GET_ATTRIBUTE_FROM_OPERATION(op=op, attribute='axes', compulsive=True)
if isinstance(axes, list):
for squeezing_dim in sorted(axes, reverse=True):
squeezing_tensor = torch.squeeze(squeezing_tensor, squeezing_dim)
elif isinstance(axes, int):
squeezing_tensor = torch.squeeze(squeezing_tensor, axes)
else: raise TypeError(f'Parameter axes of operation {op.name} misunderstood, '
f'expect int value of list of int, while {type(axes)} was given.')
return squeezing_tensor | f20c5565aafde993e011efc4e037d6a253a79d30 | 1,529 |
import functools
def build_dataset(instruction_dicts,
dataset_from_file_fn,
shuffle_files=False,
parallel_reads=64):
"""Constructs a `tf.data.Dataset` from TFRecord files.
Args:
instruction_dicts: `list` of {'filepath':, 'mask':, 'offset_mask':}
containing the information about which files and which examples to use.
The boolean mask will be repeated and zipped with the examples from
filepath.
dataset_from_file_fn: function returning a `tf.data.Dataset` given a
filename.
shuffle_files: `bool`, Whether to shuffle the input filenames.
parallel_reads: `int`, how many files to read in parallel.
Returns:
`tf.data.Dataset`
"""
# First case: All examples are taken (No value skipped)
if _no_examples_skipped(instruction_dicts):
# Only use the filenames as instruction
instruction_ds = tf.data.Dataset.from_tensor_slices([
d["filepath"] for d in instruction_dicts
])
build_ds_from_instruction = dataset_from_file_fn
# Second case: Use the instructions to read the examples
else:
instruction_ds = _build_instruction_ds(instruction_dicts)
build_ds_from_instruction = functools.partial(
_build_ds_from_instruction,
ds_from_file_fn=dataset_from_file_fn,
)
# If shuffle is True, we shuffle the instructions/shards
if shuffle_files:
instruction_ds = instruction_ds.shuffle(len(instruction_dicts))
# Use interleave to parallel read files and decode records
ds = instruction_ds.interleave(
build_ds_from_instruction,
cycle_length=parallel_reads,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
return ds | 6918db594b74d75d5fbbebf70b0f2811366c20b5 | 1,531 |
def _SetRunOptionInRequest(run_option, run_schedule, request, messages):
"""Returns request with the run option set."""
if run_option == 'manual':
arg_utils.SetFieldInMessage(
request,
'googleCloudDatacatalogV1alpha3Crawler.config.adHocRun',
messages.GoogleCloudDatacatalogV1alpha3AdhocRun())
elif run_option == 'scheduled':
scheduled_run_option = arg_utils.ChoiceToEnum(
run_schedule,
(messages.GoogleCloudDatacatalogV1alpha3ScheduledRun
.ScheduledRunOptionValueValuesEnum))
arg_utils.SetFieldInMessage(
request,
'googleCloudDatacatalogV1alpha3Crawler.config.scheduledRun.scheduledRunOption',
scheduled_run_option)
return request | 9f93aaa6b9ec3ba9350c10b914439b16ec7c19a9 | 1,532 |
from unittest.mock import patch
def test_rank_closest():
"""test if phoneme-inventory is ranked correctly
according to feature vectore distance to a given phoneme"""
# set up custom class, create instance of it
class EtymMonkeyrank_closest:
def __init__(self):
self.phoneme_inventory, self.dm_called_with = None, []
self.dm_return = iter([1, 0, 2])
def distance_measure(self, *args):
arglist = [*args]
self.dm_called_with.append(arglist)
return next(self.dm_return)
mocketym = EtymMonkeyrank_closest()
# assert exception and exception message
with raises(InventoryMissingError) as inventorymissingerror_mock:
Etym.rank_closest(
self=mocketym,
ph="d",
howmany=float("inf"),
inv=None)
assert str(inventorymissingerror_mock.value
) == "define phoneme inventory or forms.csv"
# set up2: mock pick_minmax
with patch("loanpy.helpers.pick_minmax") as pick_minmax_mock:
pick_minmax_mock.return_value = ["b", "a", "c"]
# assert
assert Etym.rank_closest(
self=mocketym, ph="d", inv=[
"a", "b", "c"]) == "b, a, c"
# assert calls
assert mocketym.dm_called_with == [['d', 'a'], ['d', 'b'], ['d', 'c']]
pick_minmax_mock.assert_called_with(
[('a', 1), ('b', 0), ('c', 2)], float("inf"))
# set up3: overwrite mock class instance, mock pick_minmax anew
mocketym = EtymMonkeyrank_closest()
with patch("loanpy.helpers.pick_minmax") as pick_minmax_mock:
pick_minmax_mock.return_value = ["b", "a"]
# assert pick_minmax picks mins correctly again
assert Etym.rank_closest(
self=mocketym, ph="d", inv=[
"a", "b", "c"], howmany=2) == "b, a"
# assert calls
assert mocketym.dm_called_with == [['d', 'a'], ['d', 'b'], ['d', 'c']]
pick_minmax_mock.assert_called_with([('a', 1), ('b', 0), ('c', 2)], 2)
# set up4: check if phoneme inventory can be accessed through self
mocketym = EtymMonkeyrank_closest()
mocketym.phoneme_inventory = ["a", "b", "c"]
with patch("loanpy.helpers.pick_minmax") as pick_minmax_mock:
pick_minmax_mock.return_value = "b"
# assert pick_minmax picks mins correctly again
assert Etym.rank_closest(
self=mocketym,
ph="d",
inv=None,
howmany=1) == "b"
# assert calls
assert mocketym.dm_called_with == [['d', 'a'], ['d', 'b'], ['d', 'c']]
pick_minmax_mock.assert_called_with([('a', 1), ('b', 0), ('c', 2)], 1)
# tear down
del mocketym, EtymMonkeyrank_closest | 6ad838f0961fb311ce68402b87f68960a1ce816f | 1,533 |
from datetime import datetime
def create_virtual_machine(module, azure):
"""
Create new virtual machine
module : AnsibleModule object
azure: authenticated azure ServiceManagementService object
Returns:
True if a new virtual machine was created, false otherwise
"""
name = module.params.get('name')
hostname = module.params.get('hostname') or name + ".cloudapp.net"
endpoints = module.params.get('endpoints').split(',')
ssh_cert_path = module.params.get('ssh_cert_path')
user = module.params.get('user')
password = module.params.get('password')
location = module.params.get('location')
role_size = module.params.get('role_size')
storage_account = module.params.get('storage_account')
image = module.params.get('image')
virtual_network_name = module.params.get('virtual_network_name')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
# Check if a deployment with the same name already exists
cloud_service_name_available = azure.check_hosted_service_name_availability(name)
if not cloud_service_name_available.result:
changed = False
else:
changed = True
# Create cloud service if necessary
try:
result = azure.create_hosted_service(service_name=name, label=name, location=location)
_wait_for_completion(azure, result, wait_timeout, "create_hosted_service")
except WindowsAzureError as e:
module.fail_json(msg="failed to create the new service name, it already exists: %s" % str(e))
# Create linux configuration
disable_ssh_password_authentication = not password
linux_config = LinuxConfigurationSet(hostname, user, password, disable_ssh_password_authentication)
# Add ssh certificates if specified
if ssh_cert_path:
fingerprint, pkcs12_base64 = get_ssh_certificate_tokens(module, ssh_cert_path)
# Add certificate to cloud service
result = azure.add_service_certificate(name, pkcs12_base64, 'pfx', '')
_wait_for_completion(azure, result, wait_timeout, "add_service_certificate")
# Create ssh config
ssh_config = SSH()
ssh_config.public_keys = PublicKeys()
authorized_keys_path = u'/home/%s/.ssh/authorized_keys' % user
ssh_config.public_keys.public_keys.append(PublicKey(path=authorized_keys_path, fingerprint=fingerprint))
# Append ssh config to linux machine config
linux_config.ssh = ssh_config
# Create network configuration
network_config = ConfigurationSetInputEndpoints()
network_config.configuration_set_type = 'NetworkConfiguration'
network_config.subnet_names = []
network_config.public_ips = None
for port in endpoints:
network_config.input_endpoints.append(ConfigurationSetInputEndpoint(name='TCP-%s' % port,
protocol='TCP',
port=port,
local_port=port))
# First determine where to store disk
today = datetime.date.today().strftime('%Y-%m-%d')
disk_prefix = u'%s-%s' % (name, name)
media_link = u'http://%s.blob.core.windows.net/vhds/%s-%s.vhd' % (storage_account, disk_prefix, today)
# Create system hard disk
os_hd = OSVirtualHardDisk(image, media_link)
# Spin up virtual machine
try:
result = azure.create_virtual_machine_deployment(service_name=name,
deployment_name=name,
deployment_slot='production',
label=name,
role_name=name,
system_config=linux_config,
network_config=network_config,
os_virtual_hard_disk=os_hd,
role_size=role_size,
role_type='PersistentVMRole',
virtual_network_name=virtual_network_name)
_wait_for_completion(azure, result, wait_timeout, "create_virtual_machine_deployment")
except WindowsAzureError as e:
module.fail_json(msg="failed to create the new virtual machine, error was: %s" % str(e))
try:
deployment = azure.get_deployment_by_name(service_name=name, deployment_name=name)
return (changed, urlparse(deployment.url).hostname, deployment)
except WindowsAzureError as e:
module.fail_json(msg="failed to lookup the deployment information for %s, error was: %s" % (name, str(e))) | 88006dec9f8e00307f4862e2cdab203867f15558 | 1,534 |
def calcCumulOverlap(modes1, modes2, array=False):
"""Returns cumulative overlap of modes in *modes2* with those in *modes1*.
Returns a number of *modes1* contains a single :class:`.Mode` or a
:class:`.Vector` instance. If *modes1* contains multiple modes, returns an
array. Elements of the array correspond to cumulative overlaps for modes
in *modes1* with those in *modes2*. If *array* is **True**, returns an array
of cumulative overlaps. Returned array has the shape ``(len(modes1),
len(modes2))``. Each row corresponds to cumulative overlaps calculated for
modes in *modes1* with those in *modes2*. Each value in a row corresponds
to cumulative overlap calculated using upto that many number of modes from
*modes2*."""
overlap = calcOverlap(modes1, modes2)
if array:
return np.sqrt(np.power(overlap, 2).sum(axis=overlap.ndim-1))
else:
return np.sqrt(np.power(overlap, 2).cumsum(axis=overlap.ndim-1)) | 6ce8c85b778ca06e1f26f9d66151656b30a4837a | 1,535 |
import multiprocessing
import tqdm
def apply_ntimes(func, n, args, verbose=True, timeout=None):
"""
Applies `n` times the function `func` on `args` (useful if, eg, `func` is partly random).
Parameters
----------
func : function
func must be pickable, see https://docs.python.org/2/library/pickle.html#what-can-be-pickled-and-unpickled .
n : int
args : any
timeout : int or float
If given, the computation is cancelled if it hasn't returned a result before `timeout` seconds.
Returns
-------
type
Result of the computation of func(iter).
"""
pool = multiprocessing.Pool()
multiple_results = [pool.apply_async(func, args) for _ in range(n)]
pool.close()
return [res.get(timeout) for res in tqdm(multiple_results, desc='# castor.parallel.apply_ntimes', disable = True)] | 91aca94c49b7cf74ceaf5f093f21853bbd310df1 | 1,536 |
def travel_time_without_Rebalancing(tnet, i, j, exo=0):
"""
evalute the travel time function for edge i->j
Parameters
----------
tnet: transportation network object
i: starting node of edge
j: ending node of edge
Returns
-------
float
"""
return sum(
[tnet.fcoeffs[n] * ((tnet.G_supergraph[i][j]['flowNoRebalancing'] +exo )/ tnet.G_supergraph[i][j]['capacity']) ** n for n in range(len(tnet.fcoeffs))]) | 00ae58356d1a808d34a559267134cb52fc8b0dc5 | 1,537 |
def twistless(*args):
"""
Wraps the entry point function, this function should setup and run a
twisted reactor.
A twisted task will be created to constantly schedule other stackless
tasklets as often as the timesched argument.
"""
def _twistless(func):
"""
Wrap the given function
"""
@wraps(func)
def wrapped(*args, **kwargs):
"""
Calls the wrapped function in a stackless tasklet and sets up a
looping twisted task to pump the schedueler.
"""
@wraps(func)
def execute():
"""
Execute the entry point and create a looping call.
"""
reactor_tasklet = sl.getcurrent()
task.LoopingCall(sl.schedule).start(timesched)
func(*args, **kwargs)
sl.tasklet(execute)()
sl.run()
return wrapped
# Add the timeshed arg if it is not given.
if len(args) == 1 and callable(args[0]):
timesched = DEFAULT_TIMESCHED
return _twistless(args[0])
else:
timesched = args[0] if len(args) >= 1 else DEFAULT_TIMESCHED
return _twistless | 75f51549bde9e07316e9dcb31c95bdf81a3cd793 | 1,538 |
import numpy
import math
def enhance_with_function(images, labels, ratio, enhance_func):
"""
:param images:
:param labels:
:param ratio: the ratio of max input class. for example, highest sample count is 1000, ratio is 3, the result
will be around 1000 * 3 * how_many_classes
:param enhance_func the func used for enhance f(image, label, how_many_to_generate)
:return: new genrated features and labels
"""
inputs_per_class = numpy.bincount(labels)
max_inputs = numpy.max(inputs_per_class)
# One Class
for i in range(len(inputs_per_class)):
input_ratio = math.ceil((max_inputs * ratio - inputs_per_class[i]) / inputs_per_class[i])
print("generating class:{} with ratio:{}, max input:{}, current:{}".format(
i, input_ratio, max_inputs, inputs_per_class[i]))
if input_ratio <= 1:
continue
new_features = []
new_labels = []
mask = numpy.where(labels == i)
for feature in images[mask]:
generated_images = enhance_func(feature, input_ratio)
for generated_image in generated_images:
new_features.append(generated_image)
new_labels.append(i)
images = numpy.append(images, new_features, axis=0)
labels = numpy.append(labels, new_labels, axis=0)
return images, labels | d16b7d3726902653bce94c11dba808da1ee88d09 | 1,539 |
async def port_create(
request: Request,
server_id: int,
port: PortCreate,
db=Depends(get_db),
user=Depends(get_current_active_admin),
):
"""
Create a new port on server
"""
db_port = create_port(db, server_id, port)
trigger_tc(db_port)
return db_port | 28e747b9af9ed04de911b1fc30653539e9e108cb | 1,540 |
def rectangle_area(base, height):
"""Returns the area of a rectangle"""
base = float(base)
height = float(height)
if (base < 0.0 or height < 0.0):
raise ValueError('Negative numbers are not allowed')
return base * height | 6dc1ea897cdeba1eb84813cefdab659abf5197ea | 1,542 |
def pipe(*args, **kwargs):
"""A processor that replaces the text of a field of an item.
Args:
item (dict): The entry to process
kwargs (dict): The keyword arguments passed to the wrapper
Kwargs:
conf (dict): The pipe configuration. Must contain the key 'rule'.
rule (dict): can be either a dict or list of dicts. Must contain
the keys 'find' and 'replace'. May contain the key 'param'.
find (str): The string to find.
replace (str): The string replacement.
param (str): The type of replacement. Must be one of: 'first',
'last', or 'every' (default: 'every').
assign (str): Attribute to assign parsed content (default: strreplace)
field (str): Item attribute to operate on (default: 'content')
Yields:
dict: an item with replaced content
Examples:
>>> conf = {'rule': {'find': 'hello', 'replace': 'bye'}}
>>> item = {'content': 'hello world'}
>>> next(pipe(item, conf=conf))['strreplace'] == 'bye world'
True
>>> rules = [
... {'find': 'Gr', 'replace': 'M'},
... {'find': 'e', 'replace': 'a', 'param': 'last'}]
>>> conf = {'rule': rules}
>>> kwargs = {'conf': conf, 'field': 'title', 'assign': 'result'}
>>> item = {'title': 'Greetings'}
>>> next(pipe(item, **kwargs))['result'] == 'Meatings'
True
"""
return parser(*args, **kwargs) | 29be8fad7df2eb674633abd160b818ed4d6697b2 | 1,543 |
def adjoint(g):
"""Return the adjoint of a rigid body transformation g."""
adg = np.zeros((6, 6))
R_part, p = g[:3, :3], g[:3, 3]
pR = skew(p) @ R_part
adg[:3, :3] = R_part
adg[-3:, -3:] = R_part
adg[:3, -3:] = pR
return adg | 6ef82620aa6db984956c7a858ebf0e8715e1e9df | 1,544 |
def dmp_rr_yun0_sqf_list(f, u, K):
"""Compute square-free decomposition of ``f`` in zero-characteristic ring ``K``.
References
==========
* :cite:`LeeM2013factor`, page 8
"""
if dmp_ground_p(f, None, u):
return []
result, count = [], 1
qs = [dmp_diff_in(f, 1, i, u, K) for i in range(u + 1)]
g = f
for q in qs:
g = dmp_gcd(g, q, u, K)
while not dmp_one_p(f, u, K):
for i in range(u + 1):
qs[i] = dmp_quo(qs[i], g, u, K)
f = dmp_quo(f, g, u, K)
for i in range(u + 1):
qs[i] = dmp_sub(qs[i], dmp_diff_in(f, 1, i, u, K), u, K)
g = f
for q in qs:
g = dmp_gcd(g, q, u, K)
if not dmp_one_p(g, u, K):
result.append((g, count))
count += 1
return result | cf917fb0f0cfd505328c07a09fe07cafd8872d7e | 1,545 |
def angle2trig(theta):
"""Convert angle to a reportlab ready tuple.
Arguments:
- theta - Angle in degrees, counter clockwise from horizontal
Returns a representation of the passed angle in a format suitable
for ReportLab rotations (i.e. cos(theta), sin(theta), -sin(theta),
cos(theta) tuple)
"""
c = cos(theta * pi / 180)
s = sin(theta * pi / 180)
return (c, s, -s, c) | b4ad079b5b9fb889b26eec37c1d14ae97a34be50 | 1,548 |
def get_state_z0_pure_state_vector() -> np.ndarray:
"""Returns the pure state vector for :math:`|0\\rangle`.
Returns
-------
np.ndarray
the pure state vector.
"""
vec = np.array([1, 0], dtype=np.complex128)
return vec | 53a7485572ea8fed8fcb8155923692050092c881 | 1,549 |
def HSV_to_CMYKratio(hsv):
"""Converts HSV color space to CMYK (ratio representation)"""
rgb = HSV_to_RGB(hsv)
return RGB_to_CMYKratio(rgb) | c6268c86dc425d7f5b386fd9dbb56e5299d9573b | 1,550 |
def delete_single_culture(user_id, culture_id):
"""Delete a culture."""
try:
culture = Culture.query.filter_by(user_id=user_id).filter_by(culture_id=culture_id).first()
if not culture:
response_object = {
'status': 'fail',
'message': f'{culture_id} does not exist.'
}
return jsonify(response_object), 404
else:
db.session.delete(culture)
db.session.commit()
response_object = {
'status': 'success',
'message': f'{culture_id} was deleted.'
}
return jsonify(response_object), 200
except exc.IntegrityError as e:
db.session.rollback()
response_object = {
'status': 'fail',
'message': 'Invalid payload.'
}
return jsonify(response_object), 400 | e96ab6e653b2d191e1c0977ee9dace114c6056ce | 1,551 |
def create_atomic_chunk(im, chunk_coord, aff_dtype=np.float32, verbose=True):
""" Creates single atomic chunk
:param im: IngestionManager
:param chunk_coord: np.ndarray
array of three ints
:param aff_dtype: np.dtype
np.float64 or np.float32
:param verbose: bool
:return:
"""
chunk_coord = np.array(list(chunk_coord), dtype=np.int)
edge_dict = collect_edge_data(im, chunk_coord, aff_dtype=aff_dtype)
mapping = collect_agglomeration_data(im, chunk_coord)
active_edge_dict, isolated_ids = define_active_edges(edge_dict, mapping)
edge_ids = {}
edge_affs = {}
edge_areas = {}
for k in edge_dict.keys():
if k == "cross":
edge_ids[k] = np.concatenate([edge_dict[k]["sv1"][:, None],
edge_dict[k]["sv2"][:, None]],
axis=1)
continue
sv1_conn = edge_dict[k]["sv1"][active_edge_dict[k]]
sv2_conn = edge_dict[k]["sv2"][active_edge_dict[k]]
aff_conn = edge_dict[k]["aff"][active_edge_dict[k]]
area_conn = edge_dict[k]["area"][active_edge_dict[k]]
edge_ids[f"{k}_connected"] = np.concatenate([sv1_conn[:, None],
sv2_conn[:, None]],
axis=1)
edge_affs[f"{k}_connected"] = aff_conn.astype(np.float32)
edge_areas[f"{k}_connected"] = area_conn
sv1_disconn = edge_dict[k]["sv1"][~active_edge_dict[k]]
sv2_disconn = edge_dict[k]["sv2"][~active_edge_dict[k]]
aff_disconn = edge_dict[k]["aff"][~active_edge_dict[k]]
area_disconn = edge_dict[k]["area"][~active_edge_dict[k]]
edge_ids[f"{k}_disconnected"] = np.concatenate([sv1_disconn[:, None],
sv2_disconn[:, None]],
axis=1)
edge_affs[f"{k}_disconnected"] = aff_disconn.astype(np.float32)
edge_areas[f"{k}_disconnected"] = area_disconn
im.cg.add_atomic_edges_in_chunks(edge_ids, edge_affs, edge_areas,
isolated_node_ids=isolated_ids)
return edge_ids, edge_affs, edge_areas | 6096e22b35a800782f394a45b6307aec23c71d57 | 1,552 |
def add_adult(request):
"""
Add a new adult record
:param request:
:return:
"""
args = dict()
app = AppUtil.get_by_user(user=request.user)
if request.method == 'POST':
form = AddAdultForm(request.POST)
if form.is_valid():
adult = form.save(commit=False)
adult.application = app[0]
adult.save()
return redirect('adult_salary', adult_id=adult.id)
else:
form = AddAdultForm()
args['form'] = form
args['nav'] = AppUtil.get_nav(nav=nav, url='adults', app=app[0])
args['progress'] = AppUtil.get_app_progress(app=app[0])
return render(request, "eat/user/application/adult/add_edit.html", args) | 8998601a05acd875fb65008fb85bbcdac7ad418d | 1,553 |
import re
def get_layers(model, filter_regexp):
"""
Filters out the layers according to a regexp. Note that
we omit biases.
Args:
- model: a nn.Module
- filter_regexp: a regexp to filter the layers to keep
according to their name in model.named_parameters().
For instance, the regexp:
down_layers\\.[123456]\\.(conv[12]|identity\\.conv))
is keeping blocks down_layers from 1 to 6, and inside
each block is keeping conv1, conv2 and identity.conv.
Remarks:
- We add (module\\.)? at the beginning of the regexp to
account for the possible use of nn.parallel.DataParallel
"""
# get all parameter names
all_layers = map(itemgetter(0), model.named_parameters())
# remove biases
all_layers = filter(lambda x: "bias" not in x, all_layers)
# remove .weight in all other names (or .weight_orig is spectral norm)
all_layers = map(lambda x: x.replace(".weight_orig", ""), all_layers)
all_layers = map(lambda x: x.replace(".weight", ""), all_layers)
# return filtered layers
filter_regexp = "(module\\.)?" + "(" + filter_regexp + ")"
r = re.compile(filter_regexp)
return list(filter(r.match, all_layers)) | d34da2bd7bfcf9827846f4aafc74d8c94ceb0d31 | 1,555 |
from typing import Union
def decrypt(data: bytes,
password: Union[str, bytes]) -> bytes:
"""
decrypt data
:param data: encrypted data
:param password: password
:return: plain data
"""
__data = gzip_decompress(data[4:]) if data.startswith(b'moca') else data
iv, cipher = __data[:AES.block_size], __data[AES.block_size:]
return __create_aes(password, iv).decrypt(cipher) | c6228e10c1498e734a42039071aa6d88356eef84 | 1,556 |
def stream_from_url(*args, **kwargs):
"""
Save the resource as a file on disk iteratively by first asking
for the 'content-length' header entry and downloading in chunks.
By default we will retry if an HTTP error arises.
By default we will uncompress a downloaded file if it is zipped.
"""
# Just redirect to download_from_url #
kwargs.update({'steam': True})
return download_from_url(*args, **kwargs) | 2ee598ac7cb19a1f884ad7faad4cec38a5f93c32 | 1,557 |
def modulo_3(lhs, ctx):
"""Element ǒ
(num) -> a % 3
(str) -> a split into chunks of size 2
"""
return {
(NUMBER_TYPE): lambda: lhs % 3,
(str): lambda: [lhs[i : i + 2] for i in range(0, len(lhs), 2)],
}.get(vy_type(lhs), lambda: vectorise(modulo_3, lhs, ctx=ctx))() | daa2775727af48d76076e54095a2503243368dc1 | 1,558 |
def geolocalizarCiudades(lista_ciudades: list):
"""Para una lista con nombres de ciudades devuelve una fila de DataFrame.
Parámetros
----------
lista_ciudades : list
Lista de nombres de ciudades.
Devuelve
-------
df_Fila: pandas.DataFrame
Fila de un DataFrame que incluye el nombre de la ciudad, el par de coordenadas, la dirección completa de la ciudad y una instancia de la clase Ciudad.
"""
rows = []
for i in lista_ciudades:
coord, direccion = geolocalizar(i)
rows.append([i, coord, direccion, Ciudad(*coord, i)])
df_Fila = pd.DataFrame(
rows,
columns=[
"Ciudad",
"Coordenadas",
"Direccion",
"ObjetoCiudad"])
return df_Fila | 14d26dba3a2fcef1334e7d13e60b01ff3d3f9ef5 | 1,560 |
def HandleConvPaddingModes(x, padding, kernel_shape, strides):
"""Returns an updated tensor and padding type for REFLECT and SYMMETRIC.
Args:
x: A 4D tensor with shape [batch_size, height, width, depth].
padding: Padding mode (SAME, VALID, REFLECT, or SYMMETRIC).
kernel_shape: Shape of convolution kernel that will be applied.
strides: Convolution stride that will be used.
Returns:
x and padding after adjustments for REFLECT and SYMMETRIC.
"""
# For 1x1 convolution, all padding modes are the same.
if np.all(kernel_shape[:2] == 1):
return x, 'VALID'
if padding == 'REFLECT' or padding == 'SYMMETRIC':
# We manually compute the number of paddings as if 'SAME'.
# From Tensorflow kernel, the formulas are as follows.
# output_shape = ceil(input_shape / strides)
# paddings = (output_shape - 1) * strides + filter_size - input_shape
# Let x, y, s be a shorthand notations for input_shape, output_shape, and
# strides, respectively. Let (x - 1) = sn + r where 0 <= r < s. Note that
# y - 1 = ceil(x / s) - 1 = floor((x - 1) / s) = n
# provided that x > 0. Therefore
# paddings = n * s + filter_size - (sn + r + 1)
# = filter_size - r - 1.
input_shape = x.get_shape() # shape at graph construction time
img_shape = tf.shape(x)[1:3] # image shape (no batch) at run time
remainder = tf.mod(img_shape - 1, strides[1:3])
pad_sizes = kernel_shape[:2] - remainder - 1
pad_rows = pad_sizes[0]
pad_cols = pad_sizes[1]
pad = tf.stack([[0, 0], tf.stack([pad_rows // 2, (pad_rows + 1) // 2]),
tf.stack([pad_cols // 2, (pad_cols + 1) // 2]), [0, 0]])
# Manually pad the input and switch the padding mode to 'VALID'.
x = tf.pad(x, pad, mode=padding)
x.set_shape([input_shape[0], x.get_shape()[1],
x.get_shape()[2], input_shape[3]])
padding = 'VALID'
return x, padding | def8d35429e568096dbb5410723c1cf550890707 | 1,561 |
import uuid
def uuid1_():
"""用于生成GUID"""
return str(uuid.uuid1()) | 8b1bf00c2c76429499a4300cc7f75fd075a0bf1c | 1,562 |
def default_if_none(default):
"""Implements the rule: default if v is None else v"""
return default_if_true(lambda v: v is None, default) | 13cf841c09e14074c38a7ae2b5fac649518e783d | 1,563 |
import asyncio
async def async_unload_entry(hass: HomeAssistantType, entry: ConfigEntry) -> bool:
"""Unload Unifi Protect config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in METEOBRIDGE_PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok | 8ef56a9029adb33853b90a9a9ba8e35e67a2d79a | 1,565 |
def auc(y, z, round=True):
"""Compute area under the ROC curve."""
if round:
y = y.round()
if len(y) == 0 or len(np.unique(y)) < 2:
return np.nan
return skm.roc_auc_score(y, z) | 895e8f37829903ee7e79012a54ecc318401ae4c6 | 1,567 |
def upperLeftOrigin( largeSize, smallSize ):
"""
The upper left coordinate (tuple) of a small rectangle in a larger rectangle (centered)
"""
origin = tuple( map( lambda x: int( ( (x[0]-x[1])/2 ) ), zip( largeSize, smallSize )) )
return origin | bda31fc5eb021f40a62b00949ced940ef171005f | 1,569 |
from re import S
import typing
import importlib
def from_ext(ext: str) -> S:
"""Get a SignedObject by file extension."""
object_types: typing.List[S] = [RpkiGhostbusters,
RpkiManifest,
RouteOriginAttestation]
entry_point_name = "rpkimancer.sigobj"
entry_points = importlib.metadata.entry_points()
for entry_point in entry_points.get(entry_point_name, []):
log.info(f"trying to load signed object {entry_point.value}")
cls = entry_point.load()
if issubclass(cls, SignedObject):
object_types.append(typing.cast(S, cls))
else:
log.warning(f"signed objects must inherit from {SignedObject}")
lookup_map = {cls.econtent_type.file_ext: cls
for cls in object_types}
try:
return lookup_map[ext]
except KeyError:
return lookup_map[ext.lstrip(".")] | 5edeb91022b2d97239038e99d565a6879532eeb0 | 1,571 |
def plot_audio(audio,time,ResultPath,title):
"""Plot and save an audio file amplitude over time"""
plt.figure()
plt.plot(time,audio, linewidth=0.01)
plt.ylabel("Amplitude")
plt.xlabel("Time (s)")
plt.title(title)
pathname=ResultPath + title
plt.savefig(pathname)
plt.show()
return() | faf8e6c38e65d6a1caebfdfd0335a92ed570d2b3 | 1,572 |
def dataSet():
"""
测试数据集
"""
x = [np.array([[1], [2], [3]]),
np.array([[2], [3], [4]])]
d = np.array([[1], [2]])
return x, d | 91b0dfb28ec81a4ca392aafd0c06f81319d5db38 | 1,573 |
def config():
"""
Get the OpenAPI Document configuration
:returns: OpenAPI configuration YAML dict
"""
with open(get_test_file_path('pygeoapi-test-openapi-config.yml')) as config_file: # noqa
return yaml_load(config_file) | 23519be12e1f6d9d79210de325a726df16946507 | 1,574 |
def convert_broadcast_lesser(node, **kwargs):
"""Map MXNet's broadcast_lesser operator attributes to onnx's Less operator
and return the created node.
"""
return create_basic_op_node('Less', node, kwargs) | 2ef5223ad38b24791d530c0c609859160b9a4c70 | 1,575 |
def histogram2d(x, y, bins_x, bins_y):
"""Histogram 2d between two continuous row vectors.
Parameters
----------
x : array_like
Vector array of shape (N,) and of type np.float32
y : array_like
Vector array of shape (N,) and of type np.float32
bins_x, bins_y : int64
Number of bins respectively for the x and y variables
Returns
-------
hist : array_like
Array of shape (bins, bins) and of type int64
"""
# x-range
x_max, x_min = x.max(), x.min()
delta_x = 1 / ((x_max - x_min) / bins_x)
# y-range
y_max, y_min = y.max(), y.min()
delta_y = 1 / ((y_max - y_min) / bins_y)
# compute histogram 2d
xy_bin = np.zeros((np.int64(bins_x), np.int64(bins_y)), dtype=np.int64)
for t in range(len(x)):
i = (x[t] - x_min) * delta_x
j = (y[t] - y_min) * delta_y
if 0 <= i < bins_x and 0 <= j < bins_y:
xy_bin[int(i), int(j)] += 1
return xy_bin | 1d7f88eb0ab25092a826a8f1157895e02608aaba | 1,577 |
from typing import Any
from typing import Tuple
def xy2latlong(x: float, y: float, ds: Any) -> Tuple[float, float]:
"""Return lat long coordinate by x, y
>>> import gdal
>>> path = "../../../tests/data/raster_for_test.tif"
>>> ds = gdal.Open(path)
>>> xy2latlong(3715171, 2909857, ds)
(1.7036231518576481, 48.994284431891565)
"""
old_cs = osr.SpatialReference()
old_cs.ImportFromWkt(ds.GetProjectionRef())
# create the new coordinate system
wgs84_wkt = """
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]"""
new_cs = osr.SpatialReference()
new_cs.ImportFromWkt(wgs84_wkt)
# create a transform object to convert between coordinate systems
transform = osr.CoordinateTransformation(old_cs, new_cs)
# get the coordinates in lat long
latlong = transform.TransformPoint(x, y)
return latlong[0], latlong[1] | 01f3a1e2d5c8e842db6668488b0a3d9d9b432295 | 1,578 |
def relative_date(r='12m', end_date='today', date_format='%Y-%m-%d',
as_string=False, unixtimestamp=False):
"""
Relative Date function
Calculates a datetime from a given end date and a relative reference.
INPUT:
r - relative date reference as '-12d' accepts d, w, m or y
end_date - 'today' (default), date string, datetime object
date_format - input format of string & output if requested
as_string - True | False (default)
decides if output is converted to string from datetime
unixtimestamp - converts datetime to an INTEGER unixtimestamp
"""
# Create Datetime object end_date based on supplied end_date
# If not string or 'today' assume already in datetime format
if end_date == 'today':
end_date = dt.datetime.today()
elif isinstance(end_date, str):
end_date = dt.datetime.strptime(end_date, date_format)
# Breakdown Relative Reference into type (i.e. d, w, m, y) & number
r = r[1::] if r[0] == '-' else r
dtype, dnum = str(r[-1]).lower(), float(r[0:-1])
# Manipulate based on relative Days, Weeks, Months or Years
if dtype == 'd': start_date = end_date - dt.timedelta(days=dnum)
elif dtype == 'w': start_date = end_date - dt.timedelta(weeks=dnum)
elif dtype == 'm': start_date = end_date - dt.timedelta(weeks=dnum*4)
elif dtype == 'y': start_date = end_date - dt.timedelta(weeks=dnum*52.143)
# Output as Strings if desirable
if as_string is True:
start_date = dt.datetime.strftime(start_date, date_format)
end_date = dt.datetime.strftime(end_date, date_format)
elif unixtimestamp is True:
start_date = int(dt.datetime.timestamp(start_date))
end_date = int(dt.datetime.timestamp(end_date))
return start_date, end_date | 720f24ce1fafa2b77979c924a9c20b1d6cc86c03 | 1,579 |
from re import T
def get_iexist_vdw_bond(ipt):
"""
check if a given mol pair contain any vdw bond, which exists
in the query mol. Note that input mol pairs must have cc=0.
"""
obj, mi, mj = ipt
iok = F
if np.any( [ set(b) <= set(mi.iasq+mj.iasq) for b in obj.ncbs ] ):
iok = T
return iok | 81af4c03ea988412cb11be3f962e40239cfbadcf | 1,580 |
def load_data(messages_filepath, categories_filepath):
"""Loads messages and categories data and creates a merged dataframe
Args:
messages_filepath (str): Path to the messages file
categories_filepath (str): Path to the categories file
Returns:
(pd.DataFrame): A messages and categories dataframe
"""
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
return messages.merge(categories, on='id') | 1f7308c2f51b587b3b27c35f680225c0c78c85b0 | 1,582 |
def is_square_inside(row, col, rows, cols):
"""Check if row and col is square inside grid having rows and cols."""
return row not in (0, rows - 1) and col not in (0, cols - 1) | f0cdcbc6d9bee6a41fd0cc84b16ffaf0638a522c | 1,583 |
def reshapeLabel(label):
"""
Reshape 1-D [0,1,...] to 2-D [[1,-1],[-1,1],...].
"""
n = label.size(0)
y = FloatTensor(n, 2)
y[:, 0] = 2 * (0.5 - label)
y[:, 1] = - y[:, 0]
return y.long() | 77716413deb3263b23a6ca8e684274fa67855375 | 1,584 |
import torch
def _coo_scipy2torch(adj, coalesce=True, use_cuda=False):
"""
convert a scipy sparse COO matrix to torch
"""
values = adj.data
indices = np.vstack((adj.row, adj.col))
i = torch.LongTensor(indices)
v = torch.FloatTensor(values)
ans = torch.sparse.FloatTensor(i, v, torch.Size(adj.shape))
if use_cuda:
ans = ans.cuda()
if coalesce:
ans = ans.coalesce()
return ans | 27d9db560dc60ec31ec7f152952db201c4e6aafb | 1,585 |
def do_add_application_type(request):
"""定义
dict_class=models.CharField(u"字典类别",max_length=255)
dict_type=models.CharField(u"字典类型",max_length=255)
dict_name=models.CharField(u"字典名称",max_length=255)
dict_value=models.CharField(u"字典值",max_length=255)
dict_status=models.IntegerField(u"字典状态")
dict_mark=models.CharField(u"字典备注",max_length=1000,null=True,blank=True)
"""
dict_class=request.POST.get("dict_class")
dict_type=request.POST.get("dict_type")
dict_name=request.POST.get("dict_name")
dict_code=request.POST.get("dict_code")
dict_status=0
dict_mark=request.POST.get("dict_mark")
try:
dicts = Dicts.objects.filter(dict_class=dict_class,dict_type=dict_type,dict_name=dict_name,dict_code=dict_code)
if dicts.exists():
return render_json({'code':True, 'msg':u"已存在相同记录信息"})
Dicts.objects.create(dict_class=dict_class,dict_type=dict_type
,dict_name=dict_name,dict_code=dict_code
,dict_status=dict_status,dict_mark=dict_mark)
logger.info('insert object to Dicts is success')
return render_json({'code':True, 'msg':u"数据保存成功"})
except Exception, e:
logger.error('insert object to Dicts is error:{}'.format(repr(e)))
return render_json({'code':False, 'msg':u"数据保存失败"}) | 713a387215132ddb435592cd834537346cfcf024 | 1,588 |
def exponential_decay_function(distance: np.ndarray) -> np.ndarray:
"""Calculate exponential discount factor for action interaction weight matrix.
Parameters
-----------
distance: array-like, shape (len_list, )
Distance between two slots.
"""
if not isinstance(distance, np.ndarray) or distance.ndim != 1:
raise ValueError("distance must be 1-dimensional ndarray")
return np.exp(-distance) | ac434d098274e5119418a2c18641dadcd1ca8dca | 1,589 |
def line_length(line, ellipsoid='WGS-84',shipping=True):
"""Length of a line in meters, given in geographic coordinates
Adapted from https://gis.stackexchange.com/questions/4022/looking-for-a-pythonic-way-to-calculate-the-length-of-a-wkt-linestring#answer-115285
Arguments:
line {Shapely LineString} -- a shapely LineString object with WGS-84 coordinates
ellipsoid {String} -- string name of an ellipsoid that `geopy` understands (see
http://geopy.readthedocs.io/en/latest/#module-geopy.distance)
Returns:
Length of line in meters
"""
if shipping == True:
if line.geometryType() == 'MultiLineString':
return sum(line_length(segment) for segment in line)
return sum(
vincenty(tuple(reversed(a)), tuple(reversed(b)), ellipsoid=ellipsoid).kilometers
for a, b in pairwise(line.coords)
)
else:
if line.geometryType() == 'MultiLineString':
return sum(line_length(segment) for segment in line)
return sum(
vincenty(a, b, ellipsoid=ellipsoid).kilometers
for a, b in pairwise(line.coords)
) | bb80b01729f589c0645606581f4a1fc53836e037 | 1,590 |
def corr2_coeff(x, y):
"""A magic function for computing correlation between matrices and arrays.
This code is 640x+ faster on large dataset compared to np.corrcoef().
------------------------------------------------------------------
author: Divakar (https://stackoverflow.com/users/3293881/divakar)
url: https://stackoverflow.com/questions/42677677
------------------------------------------------------------------
"""
# input arrays subtract row-wise mean
x_sub_mx = x - x.mean(1)[:, None]
y_sub_my = y - y.mean(1)[:, None]
# sum of squares across rows
ssx = (x_sub_mx ** 2).sum(1)
ssy = (y_sub_my ** 2).sum(1)
return np.dot(x_sub_mx, y_sub_my.T) / np.sqrt(np.dot(ssx[:, None], ssy[None])) | 5834294b9a67efdeecfde4546a805d1d136b8796 | 1,591 |
from typing import Optional
def get_database_url(track: str) -> Optional[URL]:
"""
Get the database URL based on the environment
How the database URL is selected:
1. If a predefined URL for the track is set, use that
2. If no predefined URL is set, generate one based on the preferred database type
"""
database_default_port_mapping = {MYSQL: 3306, POSTGRES: 5432}
uppercase_track = track.upper()
track_database_url = env.str(f"K8S_{uppercase_track}_DATABASE_URL", "")
if track_database_url:
return make_url(track_database_url)
database_type = get_database_type()
if not database_type:
return None
deploy_name = get_deploy_name(track)
database_port = database_default_port_mapping[database_type]
database_host = f"{deploy_name}-db-{database_type}"
database_url = (
f""
f"{database_type}://{settings.DATABASE_USER}:{settings.DATABASE_PASSWORD}"
f"@{database_host}:{database_port}"
f"/{settings.DATABASE_DB}"
)
return make_url(database_url) | 6a3ccd8bacff1f78bbd21728ca45dd7ae74be7d8 | 1,592 |
import unittest
def build_suite():
"""A function."""
#suite = unittest.TestSuite()
#suite.addTest(WidgetTestCase('test_default_size'))
#suite.addTest(WidgetTestCase('test_resize'))
suite = unittest.TestLoader().loadTestsFromTestCase(WidgetTestCase)
return suite | 2984f7a149d224dfc5d0a17b6c8eaed139234c6b | 1,593 |
def get_quantile(data, percentage, **kwargs):
"""
Assuming the dataset is loaded as type `np.array`, and has shape
(num_samples, num_features).
:param data: Provided dataset, assume each row is a data sample and \
each column is one feature.
:type data: `np.ndarray`
:param percentage: Quantile or sequence of quantiles to compute, \
which must be between 0 and 1 inclusive.
:type percentage: `float` or `np.array` of `float`
:param kwargs: Dictionary of differential privacy arguments \
for computing the specified quantile of each feature across all samples, \
e.g., epsilon, etc.
:type kwargs: `dict`
:return: A vector of shape (1, num_features) stores the
standard deviation of each feature across all samples.
:rtype: `np.array` of `float`
"""
try:
quantile_vec = np.quantile(data, q=percentage, axis=0)
except Exception as ex:
raise FLException('Error occurred when calculating '
'the quantile. ' + str(ex))
return quantile_vec | ddb02aff1e441696a9a2813d772580c5fdef0ddb | 1,594 |
def clean_repository_clone_url( repository_clone_url ):
"""Return a URL that can be used to clone a tool shed repository, eliminating the protocol and user if either exists."""
if repository_clone_url.find( '@' ) > 0:
# We have an url that includes an authenticated user, something like:
# http://[email protected]:9009/repos/some_username/column
items = repository_clone_url.split( '@' )
tmp_url = items[ 1 ]
elif repository_clone_url.find( '//' ) > 0:
# We have an url that includes only a protocol, something like:
# http://bx.psu.edu:9009/repos/some_username/column
items = repository_clone_url.split( '//' )
tmp_url = items[ 1 ]
else:
tmp_url = repository_clone_url
return tmp_url | c1d274e907d73aceaa5f1e2c52336edf1638cd8a | 1,595 |
import torch
def calculate_uncertainty_ins_seg(logits, classes):
"""
We estimate uncerainty as L1 distance between 0.0 and the logit prediction in 'logits' for the
foreground class in `classes`.
Args:
logits (Tensor): A tensor of shape (R, C, ...) or (R, 1, ...) for class-specific or
class-agnostic, where R is the total number of predicted masks in all images and C is
the number of foreground classes. The values are logits.
classes (list): A list of length R that contains either predicted of ground truth class
for eash predicted mask.
Returns:
scores (Tensor): A tensor of shape (R, 1, ...) that contains uncertainty scores with
the most uncertain locations having the highest uncertainty score.
"""
if logits.shape[1] == 1:
gt_class_logits = logits.clone()
else:
gt_class_logits = logits[
torch.arange(logits.shape[0], device=logits.device), classes
].unsqueeze(1)
return -(torch.abs(gt_class_logits)) | 794d614d63ca5df06f00ce706f6ca39ae85cfdff | 1,596 |
import torch
def euclidean_distance(x, y):
"""
Compute Euclidean distance between two Variable matrices.
---
param:
x: PyTorch Variable with shape (m, d)
y: PyTorch Variable with shape (n, d)
return:
distance: PyTorch Variable with shape (m, n)
"""
m, n = x.size(0), y.size(0)
xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n)
yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t()
distance = xx + yy
distance.addmm_(1, -2, x, y.t())
distance = distance.clamp(min=1e-12).sqrt()
return distance | 03c32aff1d0c31b7d713851e1885d2aa492dad57 | 1,597 |
def gsettings_set(schema, path, key, value):
"""Set value of gsettings schema"""
if path is None:
gsettings = Gio.Settings.new(schema)
else:
gsettings = Gio.Settings.new_with_path(schema, path)
if isinstance(value, list):
return gsettings.set_strv(key, value)
if isinstance(value, int):
return gsettings.set_int(key, value)
if isinstance(value, str):
return gsettings.set_string(key, value) | 29cddb07c10099bc70c1e823d3ffd1b125cf889a | 1,598 |
def initialize_parameters_deep(layer_dims):
"""
Arguments:
layer_dims -- python array (list) containing the dimensions of each layer in our network
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
Wl -- weight matrix of shape (layer_dims[l], layer_dims[l-1])
bl -- bias vector of shape (layer_dims[l], 1)
"""
np.random.seed(3)
parameters = {}
L = len(layer_dims) # number of layers in the network
for l in range(1, L):
### START CODE HERE ### (≈ 2 lines of code)
parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l - 1]) * 0.01
parameters['b' + str(l)] = np.zeros(shape=(layer_dims[l], 1))
### END CODE HERE ###
assert(parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l-1]))
assert(parameters['b' + str(l)].shape == (layer_dims[l], 1))
return parameters | 374a684dfe54aa0d65ea8f25b61f72a4fc21144e | 1,599 |
def get_nblocks_ntraces(f,nblocks,ntraces,pts,nbheaders,dt,read_blockhead):
"""
Read n blocks from a Varian binary file which may have multiple traces
per block.
Parameters:
* f File object of Varian binary file to read from.
* nblocks Number of blocks to read.
* ntraces Number of traces per block.
* pts Number of points per trace.
* nbheaders Number of block headers in each block.
* dt Data type of data in binary file (real).
* read_blockhead Set to True to read the varian blockheaders(s) into
the returned dictionary. False ignores them.
Returns: dic,data if read_blockhead is True, data if False
"""
# create an empty array to hold data
data = np.empty( (nblocks*ntraces,pts), dtype=dt)
if read_blockhead:
bdic = [0]*nblock
# read the data
for i in xrange(nblocks):
if read_blockhead:
bdic[i],bdata = get_block_ntraces(f,ntraces,pts,nbheaders,dt,True)
data[i*ntraces:(i+1)*ntraces] = bdata
else:
bdata = get_block_ntraces(f,ntraces,pts,nbheaders,dt,False)
data[i*ntraces:(i+1)*ntraces] = bdata
if read_blockhead:
return bdic,data
else:
return data | b99ddcf842dbc02e1afb9067e198e7e241d1a8c0 | 1,600 |
def calcMedian(list_o_tuples):
"""Given a list of tuples (A, B), where A = category, and B = counts,
returns A that represents the median count value"""
#calc total
ct = 0
for (a, b) in list_o_tuples:
ct += float(b)
med = ct / 2
#find A
ct = 0
for (i, (a, b)) in enumerate(list_o_tuples):
ct += float(b)
if ct > med:
break
#print (i, a, b)
return a | f09a9ac4b1e7a84982bf6b33e4f43e1b2c9f64f6 | 1,601 |
def add(n1, n2):
"""Adds the 2 given numbers"""
return n1 + n2 | ca670819dab8230e355e1b236d9cc74ed0b3b868 | 1,602 |
def kwarg_any(kwarg_functions):
"""Resolve kwarg predicates with short-circuit evaluation. This optimization
technique means we do not have to evaluate every predicate if one is already
true.
"""
return any(kwarg_function() for kwarg_function in kwarg_functions) | 3303e1a871bb41920ba0f41e4928e05b6d876c1e | 1,603 |
def _behler_parrinello_cutoff_fn(dr: Array,
cutoff_distance: float=8.0) -> Array:
"""Function of pairwise distance that smoothly goes to zero at the cutoff."""
# Also returns zero if the pairwise distance is zero,
# to prevent a particle from interacting with itself.
return jnp.where((dr < cutoff_distance) & (dr > 1e-7),
0.5 * (jnp.cos(jnp.pi * dr / cutoff_distance) + 1), 0) | 707f3521edf1be13c6f3c830404f851a8b606613 | 1,604 |
Subsets and Splits