content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
from . import graphics
def merge_all_mods(list_of_mods, gfx=None):
"""Merges the specified list of mods, starting with graphics if set to
pre-merge (or if a pack is specified explicitly).
Params:
list_of_mods
a list of the names of mods to merge
gfx
a graphics pack to be merged in
Returns:
A list of status ints for each mod given:
-1: Unmerged
0: Merge was successful, all well
1: Potential compatibility issues, no merge problems
2: Non-fatal error, overlapping lines or non-existent mod etc
3: Fatal error, not returned (rebuilds to previous, rest unmerged)
"""
clear_temp()
if gfx:
add_graphics(gfx)
elif will_premerge_gfx():
add_graphics(graphics.current_pack())
ret_list = []
for i, mod in enumerate(list_of_mods):
status = merge_a_mod(mod)
ret_list.append(status)
if status == 3:
log.i('Mod {}, in {}, could not be merged.'.format(
mod, str(list_of_mods)))
merged = merge_all_mods(list_of_mods[:i-1], gfx)
return merged + [-1]*len(list_of_mods[i:])
return ret_list | c0b6ed6df7116a0abcb0c2674c8bddabd4a52f82 | 3,650,600 |
def pearson_r_p_value(a, b, dim):
"""
2-tailed p-value associated with pearson's correlation coefficient.
Parameters
----------
a : Dataset, DataArray, GroupBy, Variable, numpy/dask arrays or scalars
Mix of labeled and/or unlabeled arrays to which to apply the function.
b : Dataset, DataArray, GroupBy, Variable, numpy/dask arrays or scalars
Mix of labeled and/or unlabeled arrays to which to apply the function.
dim : str
The dimension to apply the correlation along.
Returns
-------
Single value or tuple of Dataset, DataArray, Variable, dask.array.Array or
numpy.ndarray, the first type on that list to appear on an input.
2-tailed p-value.
See Also
--------
scipy.stats.pearsonr
xarray.apply_unfunc
"""
return xr.apply_ufunc(_pearson_r_p_value, a, b,
input_core_dims=[[dim], [dim]],
kwargs={'axis': -1}) | d9236eaf1d7315fd61eba35bdd4cdc4f27cb9890 | 3,650,601 |
from datetime import datetime
import time
def get_ceilometer_usages(date, connection_string):
"""
Function which talks with openstack
"""
today = datetime.datetime.combine(date, datetime.datetime.min.time())
yesterday = today - datetime.timedelta(days=1)
engine = create_engine(connection_string)
connection = engine.connect()
query = CEILOMETER_QUERY.format(
from_ts=time.mktime(yesterday.timetuple()),
to_ts=time.mktime(today.timetuple())
)
return connection.execute(query) | b05e7f2024ebf2e2eb23a914da71b834debb66cc | 3,650,602 |
def fit_kij(kij_bounds, eos, mix, datavle=None, datalle=None, datavlle=None,
weights_vle=[1., 1.], weights_lle=[1., 1.],
weights_vlle=[1., 1., 1., 1.], minimize_options={}):
"""
fit_kij: attemps to fit kij to VLE, LLE, VLLE
Parameters
----------
kij_bounds : tuple
bounds for kij correction
eos : function
cubic eos to fit kij for qmr mixrule
mix: object
binary mixture
datavle: tuple, optional
(Xexp, Yexp, Texp, Pexp)
datalle: tuple, optional
(Xexp, Wexp, Texp, Pexp)
datavlle: tuple, optional
(Xexp, Wexp, Yexp, Texp, Pexp)
weights_vle: list or array_like, optional
weights_vle[0] = weight for Y composition error, default to 1.
weights_vle[1] = weight for bubble pressure error, default to 1.
weights_lle: list or array_like, optional
weights_lle[0] = weight for X (liquid 1) composition error, default to 1.
weights_lle[1] = weight for W (liquid 2) composition error, default to 1.
weights_vlle: list or array_like, optional
weights_vlle[0] = weight for X (liquid 1) composition error, default to 1.
weights_vlle[1] = weight for W (liquid 2) composition error, default to 1.
weights_vlle[2] = weight for Y (vapor) composition error, default to 1.
weights_vlle[3] = weight for equilibrium pressure error, default to 1.
minimize_options: dict
Dictionary of any additional spefication for scipy minimize_scalar
Returns
-------
fit : OptimizeResult
Result of SciPy minimize
"""
fit = minimize_scalar(fobj_kij, kij_bounds, args=(eos, mix, datavle,
datalle, datavlle, weights_vle, weights_lle,
weights_vlle), **minimize_options)
return fit | 0f2e05a64599b49f70b327e8a69a66647b4c344f | 3,650,603 |
import os
def pickle_photospheres(photosphere_filenames, kind, meta=None):
"""
Load all model photospheres, parse the points and photospheric structures.
"""
if meta is None:
meta = {
"kind": kind,
"source_directory": os.path.dirname(photosphere_filenames[0])
}
elif not isinstance(meta, dict):
raise TypeError("meta must be a dictionary or None")
# Get the names from the first filename
parsers = {
"marcs": marcs,
"castelli/kurucz": castelli_kurucz
}
try:
parser = parsers[kind.lower()]
except KeyError:
raise ValueError("don't recognise photosphere kind '{0}'; available kinds"
" are {1}".format(kind, ", ".join(parsers.keys())))
_, parameter_names = parser.parse_filename(photosphere_filenames[0], True)
# Get the parameters of all the points
parameters = np.core.records.fromrecords(
map(parser.parse_filename, photosphere_filenames), names=parameter_names)
# Verify there are no duplicates.
array_view = parameters.view(float).reshape(parameters.size, -1)
_ = np.ascontiguousarray(array_view).view(np.dtype((np.void,
array_view.dtype.itemsize * array_view.shape[1])))
_, idx = np.unique(_, return_index=True)
if idx.size != parameters.size:
raise ValueError("{} duplicate stellar parameters found".format(
parameters.size - idx.size))
# Now sort the array by the left most columns. Keep track of the indices
# because we will load the photospheres in this order.
i = np.argsort(parameters, order=parameter_names)
parameters = parameters[i]
_, photosphere_columns = parser.parse_photospheric_structure(
photosphere_filenames[0], full_output=True)
d = np.array([parser.parse_photospheric_structure(photosphere_filenames[_]) \
for _ in i])
return (parameters, d, photosphere_columns, meta) | 5c8417899766e2f450cafaad142d9ceb55a9dd29 | 3,650,604 |
import os
import platform
def get_pid_and_server():
"""Find process id and name of server the analysis is running on
Use the platform.uname to find servername instead of os.uname because the latter is not supported on Windows.
"""
pid = os.getpid()
server = platform.uname().node
return f"{pid}@{server}" | 433d1493674e2355554d9bc6e189658e155560de | 3,650,605 |
def calc_ac_score(labels_true, labels_pred):
"""calculate unsupervised accuracy score
Parameters
----------
labels_true: labels from ground truth
labels_pred: labels form clustering
Return
-------
ac: accuracy score
"""
nclass = len(np.unique(labels_true))
labels_size = len(labels_true)
mat = labels_size * np.ones((nclass, nclass))
idx = 0
for i in range(labels_size):
mat[labels_pred[i], labels_true[i]] -= 1.0
munkres = Munkres()
mapping = munkres.compute(mat)
ac = 0.0
for i in range(labels_size):
val = mapping[labels_pred[i]][1]
if val == labels_true[i]:
ac += 1.0
ac = ac / labels_size
return ac | 39ca30d3cdcf683dda04d429146775cffd7c0134 | 3,650,606 |
def wave_ode_gamma_neq0(t, X, *f_args):
"""
Right hand side of the wave equation ODE when gamma > 0
"""
C = f_args[0]
D = f_args[1]
CD = C*D
x, y, z = X
return np.array([-(1./(1.+y) + CD)*x + C*(1+D*CD)*(z-y), x, CD*(z-y)]) | 4b2f5f7b5b4e1c932e0758e9be10fcbc5d9fbbb7 | 3,650,607 |
from typing import Dict
def run_workflow(
config: Dict,
form_data: ImmutableMultiDict,
*args,
**kwargs
) -> Dict:
"""Executes workflow and save info to database; returns unique run id."""
# Validate data and prepare run environment
form_data_dict = __immutable_multi_dict_to_nested_dict(
multi_dict=form_data
)
__validate_run_workflow_request(data=form_data_dict)
__check_service_info_compatibility(data=form_data_dict)
document = __init_run_document(data=form_data_dict)
document = __create_run_environment(
config=config,
document=document,
**kwargs
)
# Start workflow run in background
__run_workflow(
config=config,
document=document,
**kwargs
)
response = {'run_id': document['run_id']}
return response | bfa732ceaef6fbd6865e015b9c28da68932fa2db | 3,650,608 |
from typing import List
def insertion_stack(nums: List[int]) -> List[int]:
""" A helper function that sort the data in an ascending order
Args:
nums: The original data
Returns:
a sorted list in ascending order
"""
left = []
right = []
for num in nums:
while left and left[-1] > num:
right.append(left.pop())
left.append(num)
while right:
left.append(right.pop())
return left | 045e28d763ece3dac9e1f60d50a0d51c43b75664 | 3,650,609 |
def svn_wc_get_pristine_contents(*args):
"""svn_wc_get_pristine_contents(char const * path, apr_pool_t result_pool, apr_pool_t scratch_pool) -> svn_error_t"""
return _wc.svn_wc_get_pristine_contents(*args) | 5a26e358bbd2a4341bdb1c572f98d419f676a725 | 3,650,610 |
import os
def _FindAllPossibleBrowsers(finder_options, android_platform):
"""Testable version of FindAllAvailableBrowsers."""
if not android_platform:
return []
possible_browsers = []
if finder_options.webview_embedder_apk and not os.path.exists(
finder_options.webview_embedder_apk):
raise exceptions.PathMissingError(
'Unable to find apk specified by --webview-embedder-apk=%s' %
finder_options.browser_executable)
# Add the exact APK if given.
if _CanPossiblyHandlePath(finder_options.browser_executable):
if not os.path.exists(finder_options.browser_executable):
raise exceptions.PathMissingError(
'Unable to find exact apk specified by --browser-executable=%s' %
finder_options.browser_executable)
package_name = apk_helper.GetPackageName(finder_options.browser_executable)
try:
backend_settings = next(
b for b in ANDROID_BACKEND_SETTINGS if b.package == package_name)
except StopIteration:
raise exceptions.UnknownPackageError(
'%s specified by --browser-executable has an unknown package: %s' %
(finder_options.browser_executable, package_name))
possible_browsers.append(PossibleAndroidBrowser(
'exact',
finder_options,
android_platform,
backend_settings,
finder_options.browser_executable))
# Add the reference build if found.
os_version = dependency_util.GetChromeApkOsVersion(
android_platform.GetOSVersionName())
arch = android_platform.GetArchName()
try:
reference_build = binary_manager.FetchPath(
'chrome_stable', arch, 'android', os_version)
except (binary_manager.NoPathFoundError,
binary_manager.CloudStorageError):
reference_build = None
if reference_build and os.path.exists(reference_build):
# TODO(aiolos): how do we stably map the android chrome_stable apk to the
# correct backend settings?
possible_browsers.append(PossibleAndroidBrowser(
'reference',
finder_options,
android_platform,
android_browser_backend_settings.ANDROID_CHROME,
reference_build))
# Add any other known available browsers.
for settings in ANDROID_BACKEND_SETTINGS:
p_browser = PossibleAndroidBrowser(
settings.browser_type, finder_options, android_platform, settings)
if p_browser.IsAvailable():
possible_browsers.append(p_browser)
return possible_browsers | 50b3be887e940bb3f313d18348969dac49732dd8 | 3,650,611 |
import os
import yaml
def getLanguageSpecification(lang):
"""
Return a dictionary which contains the specification of a language.
Specifically, return the key-value pairs defined in the 'lang.yaml' file as
a dictionary, i.e. for the language cpp return the contents of
'plugins/cpp/lang.yaml'.
Raise an IOError if the 'lang.yaml' file does not exist.
Arguments:
lang -- the name of the language
"""
dirPath = '/'.join([os.path.dirname(__file__), PLUGIN_PATH, lang])
if not exists(dirPath):
print('ERROR:', dirPath, 'dir not found.')
raise IOError()
filePath = '/'.join([dirPath, 'lang.yaml'])
if not exists(filePath):
print('ERROR: lang.yaml file not found for language', lang)
raise IOError()
return yaml.load(open(filePath).read().strip()) | 65954b85cb88b998053141c60b7c184916163b84 | 3,650,612 |
import os
def _is_valid_file(file):
"""Returns whether a file is valid.
This means that it is a file and not a directory, but also that
it isn't an unnecessary dummy file like `.DS_Store` on MacOS.
"""
if os.path.isfile(file):
if not file.startswith('.git') and file not in ['.DS_Store']:
return True
return False | 48f3b2171b6aa2acb93cbcab6eba74f0772fe888 | 3,650,613 |
def create_cache_key(func, key_dict=None, self=None):
"""Get a cache namespace and key used by the beaker_cache decorator.
Example::
from tg import cache
from tg.caching import create_cache_key
namespace, key = create_cache_key(MyController.some_method)
cache.get_cache(namespace).remove(key)
"""
kls = None
imfunc = im_func(func)
if imfunc:
kls = im_class(func)
func = imfunc
cache_key = func.__name__
else:
cache_key = func.__name__
if key_dict:
cache_key += " " + " ".join("%s=%s" % (k, v)
for k, v in key_dict.items())
if not kls and self:
kls = getattr(self, '__class__', None)
if kls:
return '%s.%s' % (kls.__module__, kls.__name__), cache_key
else:
return func.__module__, cache_key | 461fc998a7345d646fdaa61fd36f91c3c250d331 | 3,650,614 |
def longest_common_substring(s, t):
"""
Find the longest common substring between the given two strings
:param s: source string
:type s: str
:param t: target string
:type t: str
:return: the length of the longest common substring
:rtype: int
"""
if s == '' or t == '':
return 0
f = [[0 for _ in range(len(t) + 1)]
for _ in range(len(s) + 1)]
for i in range(len(s)):
for j in range(len(t)):
if s[i] == t[j]:
f[i + 1][j + 1] = f[i][j] + 1
return max(map(max, f)) | 66aef17a117c6cc96205664f4c603594ca496092 | 3,650,615 |
def correct_predictions(output_probabilities, targets):
"""
Compute the number of predictions that match some target classes in the
output of a model.
Args:
output_probabilities: A tensor of probabilities for different output
classes.
targets: The indices of the actual target classes.
Returns:
The number of correct predictions in 'output_probabilities'.
"""
_, out_classes = output_probabilities.max(dim=1)
correct = (out_classes == targets).sum()
return out_classes, correct.item() | 1bff085d95da7b37bb2232b6ac03b034e2bdb6b9 | 3,650,616 |
def resolve_all(anno, task):
"""Resolve all pending annotations."""
return (x for x in (_first_match(anno, task), _first_match_any(anno)) if x) | ca127999972644ad25741bc48c78d67aaa4adeec | 3,650,617 |
import socket
def get_free_port():
""" Find and returns free port number. """
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
soc.bind(("", 0))
free_port = soc.getsockname()[1]
soc.close()
return free_port | d1a514a47a906c946fa3a8cb4312e71bc4f7570e | 3,650,618 |
def get_diff_list(small_list, big_list):
"""
Get the difference set of the two list.
:param small_list: The small data list.
:param big_list: The bigger data list.
:return: diff_list: The difference set list of the two list.
"""
# big_list有而small_list没有的元素
diff_list = list(set(big_list).difference(set(small_list)))
return diff_list | f92d20e6edd1f11ca6436a3ada4a6ba71da37457 | 3,650,619 |
def blend_weight_arrays(n_weightsA, n_weightsB, value=1.0, weights_pp=None):
"""
Blend two 2d weight arrays with a global mult factor, and per point weight values.
The incoming weights_pp should be a 1d array, as it's reshaped for the number of influences.
Args:
n_weightsA (np.array): Weight array to blend towards n_weightsB.
n_weightsB (np.array): Target weight array to move n_weightsA towards.
value (float): Global mult factor.
weights_pp (list/float): Per point weight values. This should be a 1d array.
Returns (numpy.ndarray): Blended weights array.
"""
if n_weightsA.shape != n_weightsB.shape:
raise ValueError('Shape of both arrays must match: {}, {}'.format(n_weightsA.shape, n_weightsB.shape))
weights_pp = weights_pp or np.ones(n_weightsA.shape[0])
weights_pp = np.repeat(weights_pp, n_weightsA.shape[1]).reshape(-1, n_weightsA.shape[1]) * value
n_weights = np_interp_by_weight(n_weightsA, n_weightsB, weights_pp)
return n_weights | f5167730773718952f48a67970d62a197bd92944 | 3,650,620 |
def weight_kabsch_dist(x1, x2, weights):
"""
Compute the Mahalabonis distance between positions x1 and x2 given Kabsch weights (inverse variance)
x1 (required) : float64 array with dimensions (n_atoms,3) of one molecular configuration
x2 (required) : float64 array with dimensions (n_atoms,3) of another molecular configuration
weights (required) : float64 matrix with dimensions (n_atoms, n_atoms) of inverse (n_atoms, n_atoms) covariance
"""
# zero distance
dist = 0.0
# compute distance as sum over indepdent (because covar is n_atoms x n_atoms) dimensions
for i in range(3):
disp = x1[:,i] - x2[:,i]
dist += np.dot(disp,np.dot(weights,disp))
# return value
return dist | e03c86875873af3b890fc3cfa799f037c808196e | 3,650,621 |
def calc_color_rarity(color_frequencies: dict) -> float:
"""
Return rarity value normalized to 64.
Value ascending from 0 (most rare) to 64 (most common).
"""
percentages = calc_pixel_percentages(color_frequencies)
weighted_rarity = [PERCENTAGES_NORMALIZED.get(k) * v * 64 for k,v in percentages.items()]
return sum(weighted_rarity) | 54dd3dde36dc02101b5536630e79d3d39fe18aa8 | 3,650,622 |
def exp_map(x, r, tangent_point=None):
"""
Let \(\mathcal{M}\) be a CCM of radius `r`, and \(T_{p}\mathcal{M}\) the
tangent plane of the CCM at point \(p\) (`tangent_point`).
This function maps a point `x` on the tangent plane to the CCM, using the
Riemannian exponential map.
:param x: np.array, point on the tangent plane (intrinsic coordinates);
:param r: float, radius of the CCM;
:param tangent_point: np.array, origin of the tangent plane on the CCM
(extrinsic coordinates); if `None`, defaults to `[0., ..., 0., r]`.
:return: the exp-map of x to the CCM (extrinsic coordinates).
"""
extrinsic_dim = x.shape[-1] + 1
if tangent_point is None:
tangent_point = np.zeros((extrinsic_dim,))
tangent_point[-1] = np.abs(r)
if isinstance(tangent_point, np.ndarray):
if tangent_point.shape != (extrinsic_dim,) and tangent_point.shape != (1, extrinsic_dim):
raise ValueError('Expected tangent_point of shape ({0},) or (1, {0}), got {1}'.format(extrinsic_dim, tangent_point.shape))
if tangent_point.ndim == 1:
tangent_point = tangent_point[np.newaxis, ...]
if not belongs(tangent_point, r)[0]:
raise ValueError('Tangent point must belong to manifold {}'.format(tangent_point))
else:
raise TypeError('tangent_point must be np.array or None')
if r > 0.:
return SphericalManifold.exp_map(tangent_point, x)
elif r < 0.:
return HyperbolicManifold.exp_map(tangent_point, x)
else:
return x | 2544e6f6054c602d5eae438b405b55dc995d190a | 3,650,623 |
import sys
def get_python_msvcrt_version():
"""Return the Visual C runtime version Python is linked to, as an int"""
python_version = sys.version_info[0:2]
if python_version < (2.4):
return 60
if python_version < (2.6):
return 71
return 90 | dcedf8bdfafd21087c8ef60aa2a2ecd0593c453d | 3,650,624 |
def _get_data_column_label_in_name(item_name):
"""
:param item_name: Name of a group or dataset
:return: Data column label or ``None``
:rtype: str on None
"""
# /1.1/measurement/mca_0 should not be interpreted as the label of a
# data column (let's hope no-one ever uses mca_0 as a label)
if measurement_mca_group_pattern.match(item_name):
return None
data_column_match = measurement_data_pattern.match(item_name)
if not data_column_match:
return None
return data_column_match.group(1) | 58a50f9b28a8dd3c30eb609bbf61eeaf1b821238 | 3,650,625 |
def _auto_backward(loss,
startup_program=None,
parameter_list=None,
no_grad_set=None,
callbacks=None,
distop_context=None):
"""
modification is inplaced
"""
act_no_grad_set = _get_no_grad_set(loss, no_grad_set)
assert isinstance(loss, Variable), "The target loss should be an Variable."
if callbacks is None:
callbacks = [error_clip_callback]
else:
assert (isinstance(callbacks, list))
assert len(loss.shape) == 1 and loss.shape[0] == 1, \
"The loss.shape should be (1L,), but the current loss.shape is {}. " \
"Maybe that you should call fluid.layers.mean to process the current loss.".format(
loss.shape)
program = loss.block.program
with program_guard(program, startup_program):
params_grads = append_backward(
loss,
parameter_list,
act_no_grad_set,
callbacks,
distop_context=distop_context)
return params_grads | f7c08e9677768faf125ccc2a273016312004c225 | 3,650,626 |
import re
def strip_from_ansi_esc_sequences(text):
"""
find ANSI escape sequences in text and remove them
:param text: str
:return: list, should be passed to ListBox
"""
# esc[ + values + control character
# h, l, p commands are complicated, let's ignore them
seq_regex = r"\x1b\[[0-9;]*[mKJusDCBAfH]"
regex = re.compile(seq_regex)
start = 0
response = ""
for match in regex.finditer(text):
end = match.start()
response += text[start:end]
start = match.end()
response += text[start:len(text)]
return response | 8597654defffbdde33b844a34e95bf7893a36855 | 3,650,627 |
def _concat_columns(args: list):
"""Dispatch function to concatenate DataFrames with axis=1"""
if len(args) == 1:
return args[0]
else:
_lib = cudf if HAS_GPU and isinstance(args[0], cudf.DataFrame) else pd
return _lib.concat(
[a.reset_index(drop=True) for a in args],
axis=1,
)
return None | e60a3d5120e50dbd2d1be5632042e702e5780bc6 | 3,650,628 |
import re
def applyRegexToList(list, regex, separator=' '):
"""Apply a list of regex to list and return result"""
if type(regex) != type(list):
regex = [regex]
regexList = [re.compile(r) for r in regex]
for r in regexList:
list = [l for l in list if r.match(l)]
list = [l.split(separator) for l in list]
return [i[0] for i in list] | eee1edebf361f9516e7b40ba793b0d13ea3070f3 | 3,650,629 |
import os
def pathsplit(path):
""" This version, in contrast to the original version, permits trailing
slashes in the pathname (in the event that it is a directory).
It also uses no recursion """
return path.split(os.path.sep) | 1a89994c8ee9bb1b9ef2b9c90575ea2b0ab21c50 | 3,650,630 |
def GetFileName(path: str) -> str:
"""Get the name of the file from the path
:type path: str
:rtype: str
"""
return splitext(basename(path))[0] | 4aa3a8b75a1ed926c173f9d978504ca2ed653e20 | 3,650,631 |
import re
from functools import reduce
def collapse(individual_refs):
"""Collapse references like [C1,C2,C3,C7,C10,C11,C12,C13] into 'C1-C3, C7, C10-C13'.
Args:
individual_refs (string): Uncollapsed references.
Returns:
string: Collapsed references.
"""
parts = []
for ref in individual_refs:
mtch = re.match(r"(?P<part_prefix>\D+)(?P<number>.+)", ref)
if mtch is not None:
part_prefix = mtch.group("part_prefix")
number = mtch.group("number")
try:
number = int(mtch.group("number"))
except ValueError:
pass
parts.append((part_prefix, number))
parts.sort()
def toRef(part):
return "{}{}".format(part[0], part[1])
def make_groups(accumulator, part):
prev = None
if len(accumulator) > 0:
group = accumulator[-1]
if len(group) > 0:
prev = group[-1]
if (prev != None) and (prev[0] == part[0]) and isinstance(prev[1], int) and ((prev[1] + 1) == part[1]):
group.append(part)
accumulator[-1] = group
else:
accumulator.append([part])
return accumulator
groups = reduce(make_groups, parts, [])
groups = map(lambda g: tuple(map(toRef, g)), groups)
collapsed = ""
for group in groups:
if (len(collapsed) > 1) and (collapsed[-2] != ","):
collapsed += ", "
if len(group) > 2:
collapsed += group[0] + "-" + group[-1]
else:
collapsed += ", ".join(group)
return collapsed | f4225586d30960cae74123806b8d44ff6f007584 | 3,650,632 |
import os
def get_parent_directory(path):
"""
Get parent directory of the path
"""
return os.path.abspath(os.path.join(path, os.pardir)) | a46ecb9f370076ec975a1823d04b7f2efb0d564d | 3,650,633 |
def generate_fig_univariate_categorical(
df_all: pd.DataFrame,
col: str,
hue: str,
nb_cat_max: int = 7,
) -> plt.Figure:
"""
Returns a matplotlib figure containing the distribution of a categorical feature.
If the feature is categorical and contains too many categories, the smallest
categories are grouped into a new 'Other' category so that the graph remains
readable.
Parameters
----------
df_all : pd.DataFrame
The input dataframe that contains the column of interest
col : str
The column of interest
hue : str
The column used to distinguish the values (ex. 'train' and 'test')
nb_cat_max : int
The number max of categories to be displayed. If the number of categories
is greater than nb_cat_max then groups smallest categories into a new
'Other' category
Returns
-------
matplotlib.pyplot.Figure
"""
df_cat = df_all.groupby([col, hue]).agg({col: 'count'})\
.rename(columns={col: "count"}).reset_index()
df_cat['Percent'] = df_cat['count'] * 100 / df_cat.groupby(hue)['count'].transform('sum')
if pd.api.types.is_numeric_dtype(df_cat[col].dtype):
df_cat = df_cat.sort_values(col, ascending=True)
df_cat[col] = df_cat[col].astype(str)
nb_cat = df_cat.groupby([col]).agg({'count': 'sum'}).reset_index()[col].nunique()
if nb_cat > nb_cat_max:
df_cat = _merge_small_categories(df_cat=df_cat, col=col, hue=hue, nb_cat_max=nb_cat_max)
fig, ax = plt.subplots(figsize=(7, 4))
sns.barplot(data=df_cat, x='Percent', y=col, hue=hue,
palette=dict_color_palette, ax=ax)
for p in ax.patches:
ax.annotate("{:.1f}%".format(np.nan_to_num(p.get_width(), nan=0)),
xy=(p.get_width(), p.get_y() + p.get_height() / 2),
xytext=(5, 0), textcoords='offset points', ha="left", va="center")
# Shrink current axis by 20%
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# Put a legend to the right of the current axis
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
# Removes plot borders
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
new_labels = [truncate_str(i.get_text(), maxlen=45) for i in ax.yaxis.get_ticklabels()]
ax.yaxis.set_ticklabels(new_labels)
return fig | 9e6f9b8739b1907f67c864ceaf177f9f1007d35b | 3,650,634 |
def pt_sharp(x, Ps, Ts, window_half, method='diff'):
"""
Calculate the sharpness of extrema
Parameters
----------
x : array-like 1d
voltage time series
Ps : array-like 1d
time points of oscillatory peaks
Ts : array-like 1d
time points of oscillatory troughs
window_half : int
Number of samples in each direction around extrema to use for sharpness estimation
Returns
-------
Psharps : array-like 1d
sharpness of peaks
Tsharps : array-like 1d
sharpness of troughs
"""
# Assure input has the same number of peaks and troughs
if len(Ts) != len(Ps):
raise ValueError('Length of peaks and troughs arrays must be equal')
# Calculate the sharpness of each peak
P = len(Ps)
Psharps = np.zeros(P)
for e in range(P):
if method == 'deriv':
Edata = x[Ps[e]-window_half: Ps[e]+window_half+1]
Psharps[e] = np.mean(np.abs(np.diff(Edata)))
elif method == 'diff':
Psharps[e] = np.mean((x[Ps[e]]-x[Ps[e]-window_half],x[Ps[e]]-x[Ps[e]+window_half]))
T = len(Ts)
Tsharps = np.zeros(T)
for e in range(T):
if method == 'deriv':
Edata = x[Ts[e]-window_half: Ts[e]+window_half+1]
Tsharps[e] = np.mean(np.abs(np.diff(Edata)))
elif method == 'diff':
Tsharps[e] = np.mean((x[Ts[e]-window_half]-x[Ts[e]],x[Ts[e]+window_half]-x[Ts[e]]))
return Psharps, Tsharps | 6d06b9343c71115fc660a298569794933267bd51 | 3,650,635 |
def associate_by_email(strategy, details, user=None, *args, **kwargs):
"""Deny duplicate email addresses for new users except in specific cases
If the incoming email is associated with existing user, authentication
is denied. Exceptions are:
* the existing user does not have associated social login
* the incoming email belongs to a trusted domain
* the duplicate email address check has been disabled in the settings
In the first two cases, the incoming social login is associated with the existing user.
In the third case a separate new user is created.
"""
logger.debug(f"starting association by email; user:{user}; details:{details}")
if user:
return
if settings.ALLOW_DUPLICATE_EMAILS:
return
email = details.get('email')
if not email:
return
backend = kwargs['backend']
User = get_user_model() # noqa
existing_users = User.objects.filter(email__iexact=email).order_by('-date_joined')
if not existing_users:
return
logger.debug(f"found existing users with email '{email}': {existing_users}")
user = existing_users[0]
trusted_email_domains = backend.setting('TRUSTED_EMAIL_DOMAINS', [])
explicitly_trusted = False
if trusted_email_domains:
email_domain = email.split('@')[1]
if email_domain in trusted_email_domains or trusted_email_domains == '*':
explicitly_trusted = True
social_set = user.social_auth.all()
# If the account doesn't have any social logins yet, or if we
# explicitly trust the social media provider, allow the signup.
if explicitly_trusted or not social_set:
return {
'user': user,
}
logger.debug(f"'{email}' already in use by existing user and email domain not trusted")
providers = [a.provider for a in social_set]
strategy.request.other_logins = LoginMethod.objects.filter(provider_id__in=providers)
error_view = AuthenticationErrorView(request=strategy.request)
return error_view.get(strategy.request) | 53eb25acac22317e697b8a3e1e8f0633502ba326 | 3,650,636 |
from datetime import datetime
def convert_date(string, report_date, bad_dates_rep, bad_dates_for):
"""
Converts date string in format dd/mm/yyyy
to format dd-Mmm-yyyy
"""
x = string.split('/')
try:
date = datetime.datetime(int(x[2]),int(x[1]),int(x[0]))
date_str = date.strftime("%Y-%m-%d")
return(date_str)
# Print out cases that do not match input date convention
except (IndexError, ValueError) as errors:
bad_dates_rep.append(report_date)
bad_dates_for.append(string)
return(string)
pass | f84db7bc2edc070a4c6b9c475458081701bca1eb | 3,650,637 |
def render_raw(request, paste, data):
"""Renders RAW content."""
return HttpResponse(paste.content, content_type="text/plain") | 2ec6fdb719e831988a4384e3690d2bec0faad405 | 3,650,638 |
def node_avg():
"""get the avg of the node stats"""
node_raw = ["average", 0, 0, 0]
for node in node_stats():
node_raw[1] += float(node[1])
node_raw[2] += float(node[2])
node_raw[3] += float(node[3])
num = len(node_stats())
node_avg = ["average",
"{:.2f}".format(node_raw[1]/num),
"{:.2f}".format(node_raw[2]/num),
"{:.2f}".format(node_raw[3]/num)]
return node_avg | 985e1f848945d8952ec224a0dd56a02e84b2ea57 | 3,650,639 |
from typing import Union
def decrypt_vault_password(key: bytes, password: Union[str, bytes]) -> Union[str, bool]:
"""Decrypt and return the given vault password.
:param key: The key to be used during the decryption
:param password: The password to decrypt
"""
if isinstance(password, str):
password = password.encode("utf-8")
f = Fernet(key)
try:
return f.decrypt(password).decode()
except InvalidToken:
return False | 3311b6dc7a9fba4152545ff3ca89881e9ceebb94 | 3,650,640 |
from datetime import datetime
import os
def readData(op,filetype,config):
"""
Reads data from Hive or CSV
"""
try:
if filetype=='csv':
print('csv')
csvU=optimusCsvUtils(config)
hr=datetime.now().hour
today=str(datetime.now().date())
path=os.path.join(config['data']['DIR'],'performance_{}_hr_{}.csv'.format(today,hr))
print(path)
df=csvU.readData(op,path=path)
elif filetype=='hive':
print(HQL)
hive=optimusHiveUtils(config)
df=hive.readData(op,HQL)
elif filetype=='jdbc':
print(HQL)
jdbc=optimusJDBC(config)
df=jdbc.readData(op,HQL)
elif filetype=='parquet':
parqU=optimusParquetUtils(config)
path=os.path.join(config['data']['DIR'],'performance_{}_hr_{}.csv'.format(today,hr))
df=parqU.readData(op,path=path)
df=parqU.readData(op)
return df
except Exception as e:
logger.critical('Exception occured during Reading the data - {}'.format(e))
raise Exception('Exception occured during Reading the data - {}'.format(e)) | 2bd0086c0ec137a296533ce062169279c565a82e | 3,650,641 |
from typing import Optional
def get_gv_rng_if_none(rng: Optional[rnd.Generator]) -> rnd.Generator:
"""get gym-gridverse module rng if input is None"""
return get_gv_rng() if rng is None else rng | 008bf9d22fb6c9f07816e62c2174c60839a5353f | 3,650,642 |
def fill_name(f):
"""
Attempts to generate an unique id and a parent from a BioPython SeqRecord.
Mutates the feature dictionary passed in as parameter.
"""
global UNIQUE
# Get the type
ftype = f['type']
# Get gene name
gene_name = first(f, "gene")
# Will attempt to fill in the uid from attributes.
uid = ''
# Deal with known types.
if ftype == 'gene':
name = gene_name or first(f, "locus_tag")
uid = name
elif ftype == 'CDS':
count = get_next_count(ftype=ftype, label=gene_name)
prot = first(f, "protein_id") or f"{gene_name}-CDS-{count}"
uid = f"{prot}"
name = prot
elif ftype == 'mRNA':
count = get_next_count(ftype=ftype, label=gene_name)
uid = first(f, "transcript_id") or f"{gene_name}-mRNA-{count}"
name = uid
elif ftype == "exon":
name = gene_name
else:
name = first(f, "organism") or first(f, "transcript_id") or None
uid = first(f, "transcript_id")
# Set the unique identifier.
f['id'] = uid or f"{ftype}-{next(COUNTER)}"
# Set the feature name.
f['name'] = name or ftype
return f | d2351eb509d72b6b2ef34b7c0b01c339acd52677 | 3,650,643 |
def run_single_softmax_experiment(beta, alpha):
"""Run experiment with agent using softmax update rule."""
print('Running a contextual bandit experiment')
cb = ContextualBandit()
ca = ContextualAgent(cb, beta=beta, alpha=alpha)
trials = 360
for _ in range(trials):
ca.run()
df = DataFrame(ca.log, columns=('context', 'action', 'reward', 'Q(c,23)',
'Q(c,14)', 'Q(c,8)', 'Q(c,3)'))
# fn = 'softmax_experiment.csv'
# df.to_csv(fn, index=False)
# print('Sequence written in', fn)
# globals().update(locals()) #
return df | 953c07ae1cdc25782f24206a0ce02bf4fc15202b | 3,650,644 |
def available(name):
"""
Returns ``True`` if the specified service is available, otherwise returns
``False``.
We look up the name with the svcs command to get back the FMRI
This allows users to use simpler service names
CLI Example:
.. code-block:: bash
salt '*' service.available net-snmp
"""
cmd = "/usr/bin/svcs -H -o FMRI {0}".format(name)
name = __salt__["cmd.run"](cmd, python_shell=False)
return name in get_all() | 371980f44a348faf83ab32b9d50583fc8e9bae41 | 3,650,645 |
def coincidence_rate(text):
""" Return the coincidence rate of the given text
Args:
text (string): the text to get measured
Returns:
the coincidence rate
"""
ko = 0
# measure the frequency of each letter in the cipher text
for letter in _VOCAB:
count = text.count(letter)
ko = ko + (count * (count - 1))
return ko / (len(text) * (len(text) - 1)) | ca1ca3d8b746ea40ba07af1cb96a194bf14c1d98 | 3,650,646 |
import numpy
def convert_bytes_to_ints(in_bytes, num):
"""Convert a byte array into an integer array. The number of bytes forming an integer
is defined by num
:param in_bytes: the input bytes
:param num: the number of bytes per int
:return the integer array"""
dt = numpy.dtype('>i' + str(num))
return numpy.frombuffer(in_bytes, dt) | 38b97fb9d5ecc5b55caf7c9409e4ab4a406a21d7 | 3,650,647 |
def search_spec(spec, search_key, recurse_key):
"""
Recursively scans spec structure and returns a list of values
keyed with 'search_key' or and empty list. Assumes values
are either list or str.
"""
value = []
if search_key in spec and spec[search_key]:
if isinstance(spec[search_key], str):
value.append(spec[search_key])
else:
value += spec[search_key]
if recurse_key in spec and spec[recurse_key]:
for child_spec in spec[recurse_key]:
value += search_spec(child_spec, search_key, recurse_key)
return sorted(value) | 9d89aacc200e205b0e6cbe49592abfd37158836a | 3,650,648 |
import test
def before_class(home=None, **kwargs):
"""Like @test but indicates this should run before other class methods.
All of the arguments sent to @test work with this decorator as well.
"""
kwargs.update({'run_before_class':True})
return test(home=home, **kwargs) | 3b36e448ec76a2c513a1f87dd29b8027b0693780 | 3,650,649 |
import os
import time
import csv
def get_airports(force_download=False):
"""
Gets or downloads the airports.csv in ~/.config/mss and returns all airports within
"""
global _airports, _airports_mtime
file_exists = os.path.exists(os.path.join(MSS_CONFIG_PATH, "airports.csv"))
if _airports and file_exists and os.path.getmtime(os.path.join(MSS_CONFIG_PATH, "airports.csv")) == _airports_mtime:
return _airports
is_outdated = file_exists \
and (time.time() - os.path.getmtime(os.path.join(MSS_CONFIG_PATH, "airports.csv"))) > 60 * 60 * 24 * 30
if (force_download or is_outdated or not file_exists) \
and QtWidgets.QMessageBox.question(None, "Allow download", f"You selected airports to be "
f"{'drawn' if not force_download else 'downloaded (~10 MB)'}." +
("\nThe airports file first needs to be downloaded or updated (~10 MB)."
if not force_download else "") + "\nIs now a good time?",
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No) \
== QtWidgets.QMessageBox.Yes:
download_progress(os.path.join(MSS_CONFIG_PATH, "airports.csv"), "https://ourairports.com/data/airports.csv")
if os.path.exists(os.path.join(MSS_CONFIG_PATH, "airports.csv")):
with open(os.path.join(MSS_CONFIG_PATH, "airports.csv"), "r", encoding="utf8") as file:
_airports_mtime = os.path.getmtime(os.path.join(MSS_CONFIG_PATH, "airports.csv"))
return list(csv.DictReader(file, delimiter=","))
else:
return [] | 345532e2eeef226c6f1883ea234ed3ede03323a2 | 3,650,650 |
import math
def hellinger_distance_poisson_variants(a_means, b_means, n_samples, sample_distances):
"""
a - The coverage vec for a variant over n_samples
b - The coverage vec for a variant over n_samples
returns average hellinger distance of multiple poisson distributions
"""
# generate distirbutions for each sample
# and calculate divergence between them
# Get the means for each contig
h_geom_mean = []
both_present = []
for i in range(0, n_samples):
# Use this indexing method as zip does not seem to work so well in njit
# Add tiny value to each to avoid division by zero
a_mean = a_means[i] + 1e-6
b_mean = b_means[i] + 1e-6
if a_mean > 1e-6 and b_mean > 1e-6:
both_present.append(i)
if a_mean > 1e-6 or b_mean > 1e-6:
# First component of hellinger distance
h1 = math.exp(-0.5 * ((np.sqrt(a_mean) - np.sqrt(b_mean))**2))
h_geom_mean.append(1 - h1)
if len(h_geom_mean) >= 1:
# convert to log space to avoid overflow errors
d = np.log(np.array(h_geom_mean))
# return the geometric mean
d = np.exp(d.sum() / len(d))
geom_sim = geom_sim_calc(both_present, sample_distances)
d = d ** (1/geom_sim)
else:
d = 1
return d | 555365ea295ef2ff1e18e5c26b6b56b1c939035a | 3,650,651 |
def min_threshold(x, thresh, fallback):
"""Returns x or `fallback` if it doesn't meet the threshold. Note, if you want to turn a hyper "off" below,
set it to "outside the threshold", rather than 0.
"""
return x if (x and x > thresh) else fallback | e92c17aafb8a7c102152d9f31d0a317b285a0ae6 | 3,650,652 |
def get_command(command, meta):
"""Construct the command."""
bits = []
# command to run
bits.append(command)
# connection params
bits.extend(connect_bits(meta))
# database name
if command == 'mysqladmin':
# these commands shouldn't take a database name
return bits
if command == 'pg_restore':
bits.append('--dbname')
if command == 'mysql':
bits.append('--database')
bits.append(meta['path'][1:])
return bits | 0c80072fa70e7943bb7693ad5eb2d24d7078b1cc | 3,650,653 |
def get_common_count(list1, list2):
"""
Get count of common between two lists
:param list1: list
:param list2: list
:return: number
"""
return len(list(set(list1).intersection(list2))) | c149b49e36e81237b775b0de0f19153b5bcf2f99 | 3,650,654 |
def text_present(nbwidget, text="Test"):
"""Check if a text is present in the notebook."""
if WEBENGINE:
def callback(data):
global html
html = data
nbwidget.dom.toHtml(callback)
try:
return text in html
except NameError:
return False
else:
return text in nbwidget.dom.toHtml() | f61f90c6fbbe5251c4839cc3ef82ed1298640345 | 3,650,655 |
def multiFilm(layers, det, e0=20.0, withPoisson=True, nTraj=defaultNumTraj, dose=defaultDose, sf=defaultCharFluor, bf=defaultBremFluor, xtraParams=defaultXtraParams):
"""multiFilm(layers, det, e0=20.0, withPoisson=True, nTraj=defaultNumTraj, dose=defaultDose, sf=defaultCharFluor, bf=defaultBremFluor, xtraParams={}):
Monte Carlo simulate a spectrum from a multilayer thin film. Layers is a iterable list of \
[material,thickness]. Note the materials must have associated densities."""
tmp = u"MC simulation of a multilayer film [%s] at %0.1f keV%s%s" % (",".join("%0.0f nm of %s" % (1.0e9 * layer[1], layer[0]) for layer in layers), e0, (" + CSF" if sf else ""), (" + BSF" if bf else ""))
return base(det, e0, withPoisson, nTraj, dose, sf, bf, tmp, buildFilm, {"Layers": layers }, xtraParams) | ae586a6860ece7e21f46e221398a462619d16acd | 3,650,656 |
def value_iteration(R, P, gamma, epsilon=1e-6):
"""
Value iteration for discounted problems.
Parameters
----------
R : numpy.ndarray
array of shape (S, A) contaning the rewards, where S is the number
of states and A is the number of actions
P : numpy.ndarray
array of shape (S, A, S) such that P[s,a,ns] is the probability of
arriving at ns by taking action a in state s.
gamma : double
discount factor
epsilon : double
precision
Returns
--------
tuple (Q, V, n_it) containing the epsilon-optimal Q and V functions,
of shapes (S, A) and (S,), respectively, and n_it, the number of iterations
"""
S, A = R.shape
Q = np.zeros((S, A))
Q_aux = np.full((S, A), np.inf)
n_it = 0
while np.abs(Q - Q_aux).max() > epsilon:
Q_aux = Q
Q = bellman_operator(Q, R, P, gamma)
n_it += 1
V = np.zeros(S)
# numba does not support np.max(Q, axis=1)
for ss in range(S):
V[ss] = Q[ss, :].max()
return Q, V, n_it | 4f8286d7519577f77f86b239c14e948eed513a6a | 3,650,657 |
def mock_api_response(response_config={}):
"""Create a mock response from the Github API."""
headers = {
'ETag': 'W/"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"',
'Cache-Control': 'public, max-age=60, s-maxage=60',
'Content-Type': 'application/json; charset=utf-8'
}
api_response = MagicMock(spec=Response)
api_response.content_type = 'application/json'
for k, v in response_config.iteritems():
if k == 'headers':
headers.update(v)
setattr(api_response, k, v)
# Request headers are case insensitive dicts,
# so we need to turn our mock headers into one.
api_response.headers = CaseInsensitiveDict(headers)
return api_response | f79af84cb51ffa063c1db2b70dce99ae61da871a | 3,650,658 |
from os import listdir
def files_with_extension(path: str,extension: str):
"""
Gives a list of the files in the given directory that have the given extension
Parameters
----------
path: str
The full path to the folder where the files are stored
extension: str
The extension of the files
Returns
-------
List[str]
A list containing the files
"""
return [f for f in listdir(path) if f.endswith(extension)] | 18d06303e9b2a734f5fe801847908e2e21b48eae | 3,650,659 |
import argparse
import getpass
def get_inputs_repl_cluster_or_vol():
"""
This input is for clusters that have replication
"""
parser = argparse.ArgumentParser()
parser.add_argument('-m', type=str,
required=True,
metavar='mvip',
help='MVIP/node name or IP')
parser.add_argument('-u', type=str,
required=True,
metavar='user',
help='username to connect with')
parser.add_argument('-p', type=str,
required=False,
metavar='user_pass',
help='password for user')
parser.add_argument('-o', type=str,
required=False,
metavar='check_opt',
choices=['cluster', 'volume'],
help='option for cluster or volume')
args = parser.parse_args()
mvip = args.m
user = args.u
check_opt = args.o
if not args.p:
user_pass = getpass("Enter password for user {} "
"on cluster {}: ".format(user,
mvip))
else:
user_pass = args.p
return mvip, user, user_pass, check_opt | 0723fabb4c55d85471fbbfa9f5a300ca9b35ac61 | 3,650,660 |
import tqdm
import json
def load_jsonl(file_path):
""" Load file.jsonl ."""
data_list = []
with open(file_path, mode='r', encoding='utf-8') as fi:
for idx, line in enumerate(tqdm(fi)):
jsonl = json.loads(line)
data_list.append(jsonl)
return data_list | 58bd0dbfa59d08036aa83e62aab47acd2c40ba6e | 3,650,661 |
from io import StringIO
from datetime import datetime
def aurora_forecast():
"""
Get the latest Aurora Forecast from http://swpc.noaa.gov.
Returns
-------
img : numpy array
The pixels of the image in a numpy array.
img_proj : cartopy CRS
The rectangular coordinate system of the image.
img_extent : tuple of floats
The extent of the image ``(x0, y0, x1, y1)`` referenced in
the ``img_proj`` coordinate system.
origin : str
The origin of the image to be passed through to matplotlib's imshow.
dt : datetime
Time of forecast validity.
"""
# GitHub gist to download the example data from
#url = ('https://gist.githubusercontent.com/belteshassar/'
# 'c7ea9e02a3e3934a9ddc/raw/aurora-nowcast-map.txt')
# To plot the current forecast instead, uncomment the following line
url = 'http://services.swpc.noaa.gov/text/aurora-nowcast-map.txt'
response_text = StringIO(urlopen(url).read().decode('utf-8'))
img = np.loadtxt(response_text)
# Read forecast date and time
response_text.seek(0)
for line in response_text:
if line.startswith('Product Valid At:', 2):
dt = datetime.strptime(line[-17:-1], '%Y-%m-%d %H:%M')
img_proj = ccrs.PlateCarree()
img_extent = (-180, 180, -90, 90)
return img, img_proj, img_extent, 'lower', dt | 04ee88aee75f7ac86063c9a57f4e5155378f9085 | 3,650,662 |
def get_number_trips(grouped_counts):
"""
Gets the frequency of number of trips the customers make
Args:
grouped_counts (Pandas.DataFrame): The grouped dataframe returned from
a get_trips method call
Returns:
Pandas.DataFrame: the dataframe containing the frequencies for each
number of trips
"""
return frequency(grouped_counts.groupby('cust_id').count(), 0) | 4045f10e95fe597e626883c586cc832aa34157c3 | 3,650,663 |
import re
def process_text(text, max_features=200, stopwords=None):
"""Splits a long text into words, eliminates the stopwords and returns
(words, counts) which is necessary for make_wordcloud().
Parameters
----------
text : string
The text to be processed.
max_features : number (default=200)
The maximum number of words.
stopwords : set of strings
The words that will be eliminated.
Notes
-----
There are better ways to do word tokenization, but I don't want to include
all those things.
"""
if stopwords is None:
stopwords = STOPWORDS
d = {}
flags = re.UNICODE if type(text) is unicode else 0
for word in re.findall(r"\w[\w']*", text, flags=flags):
if word.isdigit():
continue
word_lower = word.lower()
if word_lower in stopwords:
continue
# Look in lowercase dict.
if word_lower in d:
d2 = d[word_lower]
else:
d2 = {}
d[word_lower] = d2
# Look in any case dict.
d2[word] = d2.get(word, 0) + 1
d3 = {}
for d2 in d.values():
# Get the most popular case.
first = max(d2.iteritems(), key=item1)[0]
d3[first] = sum(d2.values())
# merge plurals into the singular count (simple cases only)
for key in d3.keys():
if key.endswith('s'):
key_singular = key[:-1]
if key_singular in d3:
val_plural = d3[key]
val_singular = d3[key_singular]
d3[key_singular] = val_singular + val_plural
del d3[key]
words = sorted(d3.iteritems(), key=item1, reverse=True)
words = words[:max_features]
maximum = float(max(d3.values()))
for i, (word, count) in enumerate(words):
words[i] = word, count/maximum
return words | 531c8eea539136701289eea5cd462476ba7fefac | 3,650,664 |
def update_graph_map(n):
"""Update the graph rail network mapbox map.
Returns:
go.Figure: Scattermapbox of rail network graph
"""
return get_graph_map() | 826b12616e9c08b05cecef8d44017a1599ed8f98 | 3,650,665 |
def get_party_leads_sql_string_for_state(party_id, state_id):
"""
:type party_id: integer
"""
str = """ select
lr.candidate_id,
c.fullname as winning_candidate,
lr.constituency_id,
cons.name as constituency,
lr.party_id,
lr.max_votes,
(lr.max_votes-sr.votes) as lead,
sr.candidate_id,
loosing_candidate.fullname as runner_up,
loosing_party.name as runner_up_party,
sr.party_id,
ltr.party_id
from latest_results lr
inner join
latest_runners_up as sr
on
sr.constituency_id = lr.constituency_id
inner join
candidate c
on
c.id = lr.candidate_id
inner join
constituency cons
on
cons.id = lr.constituency_id
inner join party loosing_party
on
loosing_party.id = sr.party_id
inner join candidate loosing_candidate
on
loosing_candidate.id = sr.candidate_id
inner join last_time_winners ltr
on
ltr.constituency_id=lr.constituency_id
where
lr.party_id = %s
and
cons.state_id = %s
and
lr.status = 'COUNTING'
order by
lead DESC""" % (party_id, state_id)
return str; | de1e200cf8651626fff04c2011b3ada12b8b08a7 | 3,650,666 |
import requests
import json
import math
import time
def goods_images(goods_url):
"""
获得商品晒图
Parameters:
goods_url - str 商品链接
Returns:
image_urls - list 图片链接
"""
image_urls = []
productId = goods_url.split('/')[-1].split('.')[0]
# 评论url
comment_url = 'https://sclub.jd.com/comment/productPageComments.action'
comment_params = {'productId':productId,
'score':'0',
'sortType':'5',
'page':'0',
'pageSize':'10',
'isShadowSku':'0',
'fold':'1'}
comment_headers = {'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.167 Safari/537.36',
'Referer':goods_url,
'Host': 'sclub.jd.com'}
comment_req = requests.get(url=comment_url, params=comment_params, headers=comment_headers, verify=False)
html = json.loads(comment_req.text)
# 获得晒图个数
imageListCount = html['imageListCount']
# 计算晒图页数,向上取整
pages = math.ceil(imageListCount / 10)
for page in range(1, pages+1):
# 获取晒图图片url
club_url = 'https://club.jd.com/discussion/getProductPageImageCommentList.action'
now = time.time()
now_str = str(now).split('.')
now = now_str[0] + now_str[-1][:3]
club_params = {'productId':productId,
'isShadowSku':'0',
'page':page,
'pageSize':'10',
'_':now}
club_headers = comment_headers
club_req = requests.get(url=club_url, params=club_params, headers=club_headers, verify=False)
html = json.loads(club_req.text)
for img in html['imgComments']['imgList']:
image_urls.append(img['imageUrl'])
# 去重
image_urls = list(set(image_urls))
# 链接合成
image_urls = list(map(lambda x: 'http:'+x, image_urls))
return image_urls | 8ed59e295ebd08788f0083be9941ecd8b09f1d84 | 3,650,667 |
def delete_index_list(base_list, index_list):
"""
根据index_list删除base_list中指定元素
:param base_list:
:param index_list:
:return:
"""
if base_list and index_list:
return [base_list[i] for i in range(len(base_list)) if (i not in index_list)] | 0dd8960d0efc168df42cabb92147f078da362e5e | 3,650,668 |
def not_found():
"""Page not found."""
return make_response(
render_template("404.html"),
404
) | 3bc56677f760937f1767e0465e4dbd0a11eb41d0 | 3,650,669 |
def _traverseAgg(e, visitor=lambda n, v: None):
"""
Traverse a parse-tree, visit each node
if visit functions return a value, replace current node
"""
res = []
if isinstance(e, (list, ParseResults, tuple)):
res = [_traverseAgg(x, visitor) for x in e]
elif isinstance(e, CompValue):
for k, val in e.iteritems():
if val != None:
res.append(_traverseAgg(val, visitor))
return visitor(e, res) | c436dbb548c6a1b7bc6ddc8ea8770cb953e76a72 | 3,650,670 |
def roll(image, delta):
"""Roll an image sideways
(A more detailed explanation goes here.)
"""
xsize, ysize = image.size
delta = delta % xsize
if delta == 0:
print("the delta was 0!")
return image
part1 = image.crop((0, 0, delta, ysize))
part2 = image.crop((delta, 0, xsize, ysize))
image.paste(part2, (0, 0, xsize-delta, ysize))
image.paste(part1, (xsize-delta, 0, xsize, ysize))
return image | b9ccd9659eedfefa5002f064a23c768d36dfdc0a | 3,650,671 |
def make_long_format(path_list, args):
"""Output list of strings in informative line-by-line format like ls -l
Args:
path_list (list of (str, zipfile.Zipinfo)): tuples, one per file
component of zipfile, with relative file path and zipinfo
args (argparse.Namespace): user arguments to script, esp. switches
Returns:
list of str: list of lines to be printed out one at a time
"""
path_str_list = []
if args.human_readable:
# by design of human-readable formatting
max_size_str_len = 4
else:
# find longest length of size str to determine width of string field
max_size_str_len = 0
for path in path_list:
# find longest size string of all paths in pathlist
size_str_len = len(format_file_size(path[1].file_size, args))
if size_str_len > max_size_str_len:
max_size_str_len = size_str_len
for path in path_list:
# extra_data = path[1].extra
# os_creator = path[1].create_system # 3-unix
if path[1].is_dir():
dir_str = "d"
else:
dir_str = "-"
perm_octal = get_zip_perms(path[1])
perm_str = perm_octal2str(perm_octal) + " "
size_str = format_file_size(path[1].file_size, args, max_size_str_len)
size_str += " "
date_str = get_zip_mtime(path[1])
path_str = color_classify(path, args)
path_str_list.append(dir_str + perm_str + size_str + date_str + path_str)
return path_str_list | 68a30c16409c98e92a31b21a911cbca7ca9ef7c4 | 3,650,672 |
import unicodedata
import re
def is_name_a_title(name, content):
"""Determine whether the name property represents an explicit title.
Typically when parsing an h-entry, we check whether p-name ==
e-content (value). If they are non-equal, then p-name likely
represents a title.
However, occasionally we come across an h-entry that does not
provide an explicit p-name. In this case, the name is
automatically generated by converting the entire h-entry content
to plain text. This definitely does not represent a title, and
looks very bad when displayed as such.
To handle this case, we broaden the equality check to see if
content is a subset of name. We also strip out non-alphanumeric
characters just to make the check a little more forgiving.
:param str name: the p-name property that may represent a title
:param str content: the plain-text version of an e-content property
:return: True if the name likely represents a separate, explicit title
"""
def normalize(s):
if not isinstance(s, string_type):
s = s.decode('utf-8')
s = unicodedata.normalize('NFKD', s)
s = s.lower()
s = re.sub('[^a-z0-9]', '', s)
return s
if not content:
return True
if not name:
return False
return normalize(content) not in normalize(name) | 2a8d3191920fba0d92670a3d520bfdf6836dbe69 | 3,650,673 |
from datetime import datetime
import traceback
def insertTweet(details, insertDuplicates=True):
""" Adds tweet to database
@param details {Dict} contains tweet details
@param insertDuplicates {Boolean} optional, if true it
will insert even if already exists
"""
try:
if not insertDuplicates:
tweet_results = get_tweet_by_id(details['itemid'])
if tweet_results != None:
logger.info(tweet_results)
return False
tweet = Tweet(
twitter_handle=details['handle'],
tweet_time=datetime.datetime.utcfromtimestamp(details['time']),
tweet_text=details['text'],
data_type=details['type'],
data_id=details['itemid'],
retweets=details['retweets'],
favorites=details['favorites'],
status=1
)
session.add(tweet)
session.commit()
addTweetToHandler(tweet,details['handle'])
return True
except Exception as e:
traceback.print_exc()
traceback.print_stack()
print("ERROR OCCURED WHEN INSERTING TWEET")
print(e)
session.rollback()
return False | e11aba2fecd3d2e0a8f21f25ea1f920512949bdc | 3,650,674 |
import os
import yaml
def dump_yaml(file_path, data):
"""Dump data to a file.
:param file_path: File path to dump data to
:type file_path: String
:param data: Dictionary|List data to dump
:type data: Dictionary|List
"""
with open(os.path.abspath(os.path.expanduser(file_path)), "w") as f:
yaml.safe_dump(data, f, default_flow_style=False)
return file_path | f1210295f5f947c51df6ef80fc479723e157a84c | 3,650,675 |
from typing import OrderedDict
def return_embeddings(embedding: str, vocabulary_size: int, embedding_dim: int,
worddicts: OrderedDict) -> np.ndarray:
"""Create array of word embeddings."""
word_embeddings = np.zeros((vocabulary_size, dim_word))
with open(embedding, 'r') as f:
for line in f:
words=line.split()
word = words[0]
vector = words[1:]
len_vec = len(vector)
if(len_vec>300):
diff = len_vec-300
word = word.join(vector[:diff])
vector = vector[diff:]
if word in worddicts and worddicts[word] < vocabulary_size:
vector = [float(x) for x in vector]
word_embeddings[worddicts[word], :] = vector[0:300]
return word_embeddings | 86379e2cc9c343733464bea207dc3f41b4dd7601 | 3,650,676 |
import sympy
def symLink(twist, dist, angle, offset):
"""
Transform matrix of this link with DH parameters.
(Use symbols)
"""
twist = twist * sympy.pi / 180
T1 = sympy.Matrix([
[1, 0, 0, dist],
[0, sympy.cos(twist), -sympy.sin(twist), 0],
[0, sympy.sin(twist), sympy.cos(twist), 0],
[0, 0, 0, 1]])
# T1[sympy.abs(T1) < 1e-3] = 0
T2 = sympy.Matrix([
[sympy.cos(angle), -sympy.sin(angle), 0, 0],
[sympy.sin(angle), sympy.cos(angle), 0, 0],
[0, 0, 1, offset],
[0, 0, 0, 1]])
return T1 * T2 | a6e2ac09866f2b54ffb33da681ba9d19e74e57f0 | 3,650,677 |
import aiohttp
from typing import Tuple
from typing import Dict
from typing import Any
from typing import Sequence
async def _parse_action_body(service: UpnpServerService, request: aiohttp.web.Request) -> Tuple[str, Dict[str, Any]]:
"""Parse action body."""
# Parse call.
soap = request.headers.get("SOAPAction", "").strip('"')
try:
_, action_name = soap.split("#")
data = await request.text()
root_el: ET.Element = DET.fromstring(data)
body_els: Sequence[ET.Element] = root_el.find("s:Body", NAMESPACES)
rpc_el = body_els[0]
except Exception as exc:
raise aiohttp.web.HTTPBadRequest(reason="InvalidSoap") from exc
if action_name not in service.actions:
raise aiohttp.web.HTTPBadRequest(reason="InvalidAction")
kwargs: Dict[str, Any] = {}
action = service.action(action_name)
for arg in rpc_el:
action_arg = action.argument(arg.tag, direction="in")
if action_arg is None:
raise aiohttp.web.HTTPBadRequest(reason="InvalidArg")
state_var = action_arg.related_state_variable
kwargs[arg.tag] = state_var.coerce_python(arg.text)
return action_name, kwargs | d5f390d956d726ffca0d37891815b8ccf488a826 | 3,650,678 |
import json
def get_tc_json():
"""Get the json for this testcase."""
try:
with open(GLOBAL_INPUT_JSON_PATH) as json_file:
tc = json.load(json_file)
except Exception:
return_error('Could not custom_validator_input.json')
return tc | de19278f5edb415d40e383d2ad08dfc6e968cb81 | 3,650,679 |
def dualgauss(x, x1, x2, w1, w2, a1, a2, c=0):
"""
Sum of two Gaussian distributions. For curve fitting.
Parameters
----------
x: np.array
Axis
x1: float
Center of 1st Gaussian curve
x2: float
Center of 2nd Gaussian curve
w1: float
Width of 1st Gaussian curve
w2: float
Width of 2nd Gaussian curve
a1: float
Amplitude of 1st Gaussian curve
a2: float
Amplitude of 2nd Gaussian curve
c: float, optional
Offset, defaults to 0
"""
return a1*np.exp(-0.5*((x-x1)/w1)**2)+a2*np.exp(-0.5*((x-x2)/w2)**2) + c | d60d63ad0776aa6d5babfe5e963503f18dca0c3e | 3,650,680 |
def pdg_format3( value , error1 , error2 , error3 , latex = False , mode = 'total' ) :
"""Round value/error accoridng to PDG prescription and format it for print
@see http://pdg.lbl.gov/2010/reviews/rpp2010-rev-rpp-intro.pdf
@see section 5.3 of doi:10.1088/0954-3899/33/1/001
Quote:
The basic rule states that
- if the three highest order digits of the error lie between 100 and 354, we round to two significant digits.
- If they lie between 355 and 949, we round to one significant digit.
- Finally, if they lie between 950 and 999, we round up to 1000 and keep two significant digits.
In all cases, the central value is given with a precision that matches that of the error.
>>> value, error1, error2 = ...
>>> print ' Rounded value/error is %s ' % pdg_format2 ( value , error1 , error2 , True )
"""
error = ref_error ( mode , error1 , error2 , error3 )
val , err , q , ecase = pdg_round__ ( value , error )
if ecase <= 0 or ( not isfinite ( error1 ) ) or ( not isfinite ( error2 ) ) or ( not isfinite ( error3 ) ) :
if not isfinite ( val ) :
return ( '%+g \\pm %-g \\pm %-g \\pm %-g ' % ( val , error1 , error2 , error3 ) ) if latex else \
( '%+g +/- %-g +/- %-g +/- %-g' % ( val , error1 , error2 , error3 ) )
else :
qv , bv = _frexp10_ ( val )
if 0 != bv :
scale = 1.0 / 10**bv
if latex : return '(%+.2f \\pm %-s \\pm %-s)\\times 10^{%d}' % ( qv , error1 * scale , error2 * scale , error3 * scale , bv )
else : return ' %+.2f +/- %-s +/ %-s +/- %-s )*10^{%d} ' % ( qv , error1 * scale , error2 * scale , error3 * scale , bv )
else :
if latex : return ' %+.2f \\pm %-s \\pm %-s \\pm %-s ' % ( qv , error1 , error2 , error3 )
else : return ' %+.2f +/- %-s +/- %-s +/- %-s ' % ( qv , error1 , error2 , error3 )
qe , be = _frexp10_ ( error )
a , b = divmod ( be , 3 )
if 1 == ecase :
err1 = round_N ( error1 , 2 ) if isclose ( error1 , error , 1.e-2 ) else err
err2 = round_N ( error2 , 2 ) if isclose ( error2 , error , 1.e-2 ) else err
err3 = round_N ( error3 , 2 ) if isclose ( error3 , error , 1.e-2 ) else err
if 0 == b :
nd = 1
elif 1 == b :
nd = 3
a += 1
elif 2 == b :
a += 1
nd = 2
elif 2 == ecase :
err1 = round_N ( error1 , 1 ) if isclose ( error1 , error , 1.e-2 ) else err
err2 = round_N ( error2 , 1 ) if isclose ( error2 , error , 1.e-2 ) else err
err3 = round_N ( error3 , 1 ) if isclose ( error3 , error , 1.e-2 ) else err
if 0 == b :
nd = 0
if 2 == a % 3 :
nd = 3
a = a + 1
elif 1 == b :
nd = 2
a += 1
elif 2 == b :
nd = 1
a += 1
elif 3 == ecase :
err1 = round_N ( error1 , 2 ) if isclose ( error1 , error , 1.e-2 ) else err
err2 = round_N ( error2 , 2 ) if isclose ( error2 , error , 1.e-2 ) else err
err3 = round_N ( error3 , 2 ) if isclose ( error3 , error , 1.e-2 ) else err
if 0 == b :
nd = 0
if 2 == a % 3 :
nd = 3
a = a + 1
elif 1 == b :
nd = 2
a += 1
elif 2 == b :
nd = 1
a += 1
if 0 == a :
if latex: fmt = '(%%+.%df \\pm %%.%df \\pm %%.%df \\pm %%.%df)' % ( nd , nd , nd , nd )
else : fmt = ' %%+.%df +/- %%.%df +/- %%.%df +/- %%.%df ' % ( nd , nd , nd . nd )
return fmt % ( val , err )
if latex: fmt = '(%%+.%df \\pm %%.%df \\pm %%.%df \\pm %%.%df)\\times 10^{%%d}' % ( nd , nd , nd , nd )
else : fmt = '(%%+.%df +/- %%.%df +/- %%.%df +/- %%.%df)*10^{%%d}' % ( nd , nd , nd , nd )
scale = 1.0/10**(3*a)
return fmt % ( val * scale , err1 * scale , err2 * scale , err3 * scale , 3 * a ) | 9d75007e19d60caac14a2a830800e7db215c0de6 | 3,650,681 |
from datetime import datetime
def getChinaHoliday(t):
"""找出距离输入日期最近的中国节日,输出距离的天数"""
date_time = datetime.datetime.strptime(t, '%d %B %Y')
y = date_time.year
# 中国阳历节日
sh = [
(y, 1, 1), # 元旦
(y, 4, 5), # 清明
(y, 5, 1), # 五一劳动节
(y, 10, 1) # 国庆节
]
# 中国阴历节日
lh = [
(y, 1, 1), # 大年初一(春节)
(y, 5, 5), # 端午节
(y, 8, 15) # 中秋节
]
res = 365
for h in sh:
hd = datetime.datetime(h[0], h[1], h[2], 0, 0, 0)
ds = (date_time-hd).days
if abs(ds) < res: # 距离输入的日期最近的阳历节日
res = abs(ds)
for h in lh:
ld = lunardate.LunarDate(h[0], h[1], h[2], 0).toSolarDate()
hd = datetime.datetime(ld.year, ld.month, ld.day, 0, 0, 0)
ds = (date_time-hd).days
if abs(ds) < res: # 距离输入的日期最近的阴历节日
res = abs(ds)
# print t,res
return res
pass | bc9520f56135d86cf196bfe30bde0ea645377f45 | 3,650,682 |
def parse_mimetype(mimetype):
"""Parses a MIME type into its components.
:param str mimetype: MIME type
:returns: 4 element tuple for MIME type, subtype, suffix and parameters
:rtype: tuple
Example:
>>> parse_mimetype('text/html; charset=utf-8')
('text', 'html', '', {'charset': 'utf-8'})
"""
if not mimetype:
return '', '', '', {}
parts = mimetype.split(';')
params = []
for item in parts[1:]:
if not item:
continue
key, value = item.split('=', 1) if '=' in item else (item, '')
params.append((key.lower().strip(), value.strip(' "')))
params = dict(params)
fulltype = parts[0].strip().lower()
if fulltype == '*':
fulltype = '*/*'
mtype, stype = fulltype.split('/', 1) \
if '/' in fulltype else (fulltype, '')
stype, suffix = stype.split('+', 1) if '+' in stype else (stype, '')
return mtype, stype, suffix, params | a9abfde73528e6f76cca633efe3d4c881dccef82 | 3,650,683 |
def terraform_state_bucket(config):
"""Get the bucket name to be used for the remote Terraform state
Args:
config (dict): The loaded config from the 'conf/' directory
Returns:
string: The bucket name to be used for the remote Terraform state
"""
# If a bucket name is specified for the remote Terraform state, we can assume the bucket
# should NOT be created
default_name = DEFAULT_TERRAFORM_STATE_BUCKET_SUFFIX.format(
config['global']['account']['prefix']
)
if 'terraform' not in config['global']:
return default_name, True # Use the default name and create the bucket
bucket_name = config['global']['terraform'].get(
'bucket_name',
default_name
)
return bucket_name, bucket_name == default_name | 443ae393896d180f3e419db7a6b7e346dca0655c | 3,650,684 |
def get_binary_matrix(gene_expr, libraries):
"""
Get binary matrix with genes as rows and pathways as columns.
If a gene is found in a given pathway, it is given a value of
1. Else, 0. Only the list of genes in common between that found
in the gene set libraries and the current dataset are used.
"""
function_to_genes = {}
set_genes = set()
for lib in libraries:
f2g, genes = gene_set_dictionaries(lib)
function_to_genes.update(f2g)
set_genes = set_genes | set(genes)
common_genes = list(set_genes & set(gene_expr))
binary_matrix = gs_binary_matrix(function_to_genes, set_genes).loc[common_genes]
return binary_matrix | 53f39909efc1dfb083cba734a01f77d181f4c36c | 3,650,685 |
def get_tip_downvotes(tips_id):
"""
GET function for retrieving all User objects that have downvoted a tip
"""
tip = Tips.objects.get(id=tips_id)
tips_downvotes = (tip.to_mongo())["downvotes"]
tips_downvotes_list = [
User.objects.get(id=str(user)).to_mongo() for user in tips_downvotes
]
response = {"users": tips_downvotes_list}
return create_response(data=response) | b528be2bd74169a4baff14ecb473ef12d8554be9 | 3,650,686 |
from typing import List
from typing import Dict
def get_placements(
big_graph: nx.Graph, small_graph: nx.Graph, max_placements=100_000
) -> List[Dict]:
"""Get 'placements' mapping small_graph nodes onto those of `big_graph`.
This function considers monomorphisms with a restriction: we restrict only to unique set
of `big_graph` qubits. Some monomorphisms may be basically
the same mapping just rotated/flipped which we purposefully exclude. This could
exclude meaningful differences like using the same qubits but having the edges assigned
differently, but it prevents the number of placements from blowing up.
Args:
big_graph: The parent, super-graph. We often consider the case where this is a
nx.Graph representation of a Device whose nodes are `cirq.Qid`s like `GridQubit`s.
small_graph: The subgraph. We often consider the case where this is a NamedTopology
graph.
max_placements: Raise a value error if there are more than this many placement
possibilities. It is possible to use `big_graph`, `small_graph` combinations
that result in an intractable number of placements.
Raises:
ValueError: if the number of placements exceeds `max_placements`.
Returns:
A list of placement dictionaries. Each dictionary maps the nodes in `small_graph` to
nodes in `big_graph` with a monomorphic relationship. That's to say: if an edge exists
in `small_graph` between two nodes, it will exist in `big_graph` between the mapped nodes.
"""
matcher = nx.algorithms.isomorphism.GraphMatcher(big_graph, small_graph)
# de-duplicate rotations, see docstring.
dedupe = {}
for big_to_small_map in matcher.subgraph_monomorphisms_iter():
dedupe[frozenset(big_to_small_map.keys())] = big_to_small_map
if len(dedupe) > max_placements:
# coverage: ignore
raise ValueError(
f"We found more than {max_placements} placements. Please use a "
f"more constraining `big_graph` or a more constrained `small_graph`."
)
small_to_bigs = []
for big in sorted(dedupe.keys()):
big_to_small_map = dedupe[big]
small_to_big_map = {v: k for k, v in big_to_small_map.items()}
small_to_bigs.append(small_to_big_map)
return small_to_bigs | fad71c888639ba29c0b0d2d61ddeff2a2c1d8653 | 3,650,687 |
import subprocess
def make_static_server_url_stop(root, host=HOST, port=PORT):
"""start a tornado static file server"""
server_args = [
"python",
str(PA11Y / "serve.py"),
f"--host={host}",
f"--port={port}",
f"--path={root}",
]
url = f"http://{host}:{port}/"
def stop():
server.terminate()
server.wait()
server = subprocess.Popen(server_args)
return server, url, stop | 9c6e465467b283d8bb27b7308e6271e728c9f491 | 3,650,688 |
import requests
import urllib
import http
def request_url(url):
"""
get the resource associated with a url.
"""
try:
# -- todo; eliminate pesky assignment so can be put into chain of Ok then's.
user_agent = 'Mozilla/5.0 (Windows NT 6.0) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.43 Safari/537.31'
response = requests.get(
urllib.parse.unquote(url), headers = {
'User-agent': user_agent,
'Connection': 'close'
}, timeout = 30)
return Ok(response)
except http.client.BadStatusLine as err:
return Err({
'message': "%s returned an unrecognised status." % (url, ),
'code': 404
})
except requests.exceptions.ConnectionError as err:
return Err({
'message': "%s refused the connection." % (url, ),
'code': 404
})
except requests.exceptions.Timeout as err:
return Err({
'message': "%s timed out." % (url, ),
'code': 404
})
except Exception as err:
return Err(err) | 0d7df45e038c65b8b2760bd0304c242d4af294c5 | 3,650,689 |
import inspect
import six
def _filter_baseanalysis_kwargs(function, kwargs):
"""
create two dictionaries with kwargs separated for function and AnalysisBase
Parameters
----------
function : callable
function to be called
kwargs : dict
keyword argument dictionary
Returns
-------
base_args : dict
dictionary of AnalysisBase kwargs
kwargs : dict
kwargs without AnalysisBase kwargs
Raises
------
ValueError : if ``function`` has the same kwargs as ``BaseAnalysis``
"""
base_argspec = inspect.getargspec(AnalysisBase.__init__)
n_base_defaults = len(base_argspec.defaults)
base_kwargs = {name: val
for name, val in zip(base_argspec.args[-n_base_defaults:],
base_argspec.defaults)}
argspec = inspect.getargspec(function)
for base_kw in six.iterkeys(base_kwargs):
if base_kw in argspec.args:
raise ValueError(
"argument name '{}' clashes with AnalysisBase argument."
"Now allowed are: {}".format(base_kw, list(base_kwargs.keys())))
base_args = {}
for argname, default in six.iteritems(base_kwargs):
base_args[argname] = kwargs.pop(argname, default)
return base_args, kwargs | a674c640618ebba3d2c29fec0458773344c84be6 | 3,650,690 |
def torch_to_flax(torch_params, get_flax_keys):
"""Convert PyTorch parameters to nested dictionaries"""
def add_to_params(params_dict, nested_keys, param, is_conv=False):
if len(nested_keys) == 1:
key, = nested_keys
params_dict[key] = np.transpose(param, (2, 3, 1, 0)) if is_conv else np.transpose(param)
else:
assert len(nested_keys) > 1
first_key = nested_keys[0]
if first_key not in params_dict:
params_dict[first_key] = {}
add_to_params(params_dict[first_key], nested_keys[1:], param, ('conv' in first_key and \
nested_keys[-1] != 'bias'))
def add_to_state(state_dict, keys, param):
key_str = ''
for k in keys[:-1]:
key_str += f"/{k}"
if key_str not in state_dict:
state_dict[key_str] = {}
state_dict[key_str][keys[-1]] = param
flax_params, flax_state = {}, {}
for key, tensor in torch_params.items():
if flax_keys[-1] is None:
continue
flax_keys = get_flax_keys(key.split('.'))
if flax_keys[-1] == 'mean' or flax_keys[-1] == 'var':
add_to_state(flax_state, flax_keys, tensor.detach().numpy())
else:
add_to_params(flax_params, flax_keys, tensor.detach().numpy())
return flax_params, flax_state | fd87617e3e0db491ff313218883961a1c2aa9d0f | 3,650,691 |
from typing import Union
from pathlib import Path
from typing import Optional
def subset_shape(
ds: Union[xarray.DataArray, xarray.Dataset],
shape: Union[str, Path, gpd.GeoDataFrame],
raster_crs: Optional[Union[str, int]] = None,
shape_crs: Optional[Union[str, int]] = None,
buffer: Optional[Union[int, float]] = None,
start_date: Optional[str] = None,
end_date: Optional[str] = None,
first_level: Optional[Union[float, int]] = None,
last_level: Optional[Union[float, int]] = None,
) -> Union[xarray.DataArray, xarray.Dataset]:
"""Subset a DataArray or Dataset spatially (and temporally) using a vector shape and date selection.
Return a subset of a DataArray or Dataset for grid points falling within the area of a Polygon and/or
MultiPolygon shape, or grid points along the path of a LineString and/or MultiLineString. If the shape
consists of several disjoint polygons, the output is cut to the smallest bbox including all
polygons.
Parameters
----------
ds : Union[xarray.DataArray, xarray.Dataset]
Input values.
shape : Union[str, Path, gpd.GeoDataFrame]
Path to shape file, or directly a geodataframe. Supports formats compatible with geopandas.
raster_crs : Optional[Union[str, int]]
EPSG number or PROJ4 string.
shape_crs : Optional[Union[str, int]]
EPSG number or PROJ4 string.
buffer : Optional[Union[int, float]]
Buffer the shape in order to select a larger region stemming from it. Units are based on the shape degrees/metres.
start_date : Optional[str]
Start date of the subset.
Date string format -- can be year ("%Y"), year-month ("%Y-%m") or year-month-day("%Y-%m-%d").
Defaults to first day of input data-array.
end_date : Optional[str]
End date of the subset.
Date string format -- can be year ("%Y"), year-month ("%Y-%m") or year-month-day("%Y-%m-%d").
Defaults to last day of input data-array.
first_level : Optional[Union[int, float]]
First level of the subset.
Can be either an integer or float.
Defaults to first level of input data-array.
last_level : Optional[Union[int, float]]
Last level of the subset.
Can be either an integer or float.
Defaults to last level of input data-array.
Returns
-------
Union[xarray.DataArray, xarray.Dataset]
A subset of `ds`
Notes
-----
If no CRS is found in the shape provided (e.g. RFC-7946 GeoJSON, https://en.wikipedia.org/wiki/GeoJSON),
assumes a decimal degree datum (CRS84). Be advised that EPSG:4326 and OGC:CRS84 are not identical as axis order of
lat and long differs between the two (for more information, see: https://github.com/OSGeo/gdal/issues/2035).
Examples
--------
>>> import xarray as xr # doctest: +SKIP
>>> from clisops.core.subset import subset_shape # doctest: +SKIP
>>> pr = xr.open_dataset(path_to_pr_file).pr # doctest: +SKIP
...
# Subset data array by shape
>>> prSub = subset_shape(pr, shape=path_to_shape_file) # doctest: +SKIP
...
# Subset data array by shape and single year
>>> prSub = subset_shape(pr, shape=path_to_shape_file, start_date='1990-01-01', end_date='1990-12-31') # doctest: +SKIP
...
# Subset multiple variables in a single dataset
>>> ds = xr.open_mfdataset([path_to_tasmin_file, path_to_tasmax_file]) # doctest: +SKIP
>>> dsSub = subset_shape(ds, shape=path_to_shape_file) # doctest: +SKIP
"""
wgs84 = CRS(4326)
# PROJ4 definition for WGS84 with longitudes ranged between -180/+180.
wgs84_wrapped = CRS.from_string(
"+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs lon_wrap=180"
)
if isinstance(ds, xarray.DataArray):
ds_copy = ds._to_temp_dataset()
else:
ds_copy = ds.copy()
if isinstance(shape, gpd.GeoDataFrame):
poly = shape.copy()
else:
poly = gpd.GeoDataFrame.from_file(shape)
if buffer is not None:
poly.geometry = poly.buffer(buffer)
# Get the shape's bounding box.
minx, miny, maxx, maxy = poly.total_bounds
lon_bnds = (minx, maxx)
lat_bnds = (miny, maxy)
# If polygon doesn't cross prime meridian, subset bbox first to reduce processing time
# Only case not implemented is when lon_bnds cross the 0 deg meridian but dataset grid has all positive lons
try:
ds_copy = subset_bbox(ds_copy, lon_bnds=lon_bnds, lat_bnds=lat_bnds)
except ValueError as e:
raise ValueError(
"No grid cell centroids found within provided polygon bounding box. "
'Try using the "buffer" option to create an expanded area.'
) from e
except NotImplementedError:
pass
lon = get_lon(ds_copy)
lat = get_lat(ds_copy)
if start_date or end_date:
ds_copy = subset_time(ds_copy, start_date=start_date, end_date=end_date)
if first_level or last_level:
ds_copy = subset_level(ds_copy, first_level=first_level, last_level=last_level)
# Determine whether CRS types are the same between shape and raster
if shape_crs is not None:
try:
shape_crs = CRS.from_user_input(shape_crs)
except ValueError:
raise
else:
try:
shape_crs = CRS(poly.crs)
except CRSError:
poly.crs = wgs84
shape_crs = wgs84
wrap_lons = False
if raster_crs is not None:
try:
raster_crs = CRS.from_user_input(raster_crs)
except ValueError:
raise
else:
if np.min(lat_bnds) < -90 or np.max(lat_bnds) > 90:
raise ValueError("Latitudes exceed domain of WGS84 coordinate system.")
if np.min(lon_bnds) < -180 or np.max(lon_bnds) > 180:
raise ValueError("Longitudes exceed domain of WGS84 coordinate system.")
try:
# Extract CF-compliant CRS_WKT from crs variable.
raster_crs = CRS.from_cf(ds_copy.crs.attrs)
except AttributeError as e:
# This is guessing that lons are wrapped around at 180+ but without much information, this might not be true
if np.min(lon) >= -180 and np.max(lon) <= 180:
raster_crs = wgs84
elif np.min(lon) >= 0 and np.max(lon) <= 360:
wrap_lons = True
raster_crs = wgs84_wrapped
else:
raise CRSError(
"Raster CRS is not known and does not resemble WGS84."
) from e
_check_crs_compatibility(shape_crs=shape_crs, raster_crs=raster_crs)
mask_2d = create_mask(x_dim=lon, y_dim=lat, poly=poly, wrap_lons=wrap_lons).clip(
1, 1
)
# 1 on the shapes, NaN elsewhere.
# We simply want to remove the 0s from the zeroth shape, for our outer mask trick below.
if np.all(mask_2d.isnull()):
raise ValueError(
f"No grid cell centroids found within provided polygon bounds ({poly.bounds}). "
'Try using the "buffer" option to create an expanded areas or verify polygon.'
)
sp_dims = set(mask_2d.dims) # Spatial dimensions
# Find the outer mask. When subsetting unconnected shapes,
# we dont want to drop the inner NaN regions, it may cause problems downstream.
inner_mask = xarray.full_like(mask_2d, True, dtype=bool)
for dim in sp_dims:
# For each dimension, propagate shape indexes in either directions
# Then sum on the other dimension. You get a step function going from 0 to X.
# The non-zero part that left and right have in common is the "inner" zone.
left = mask_2d.bfill(dim).sum(sp_dims - {dim})
right = mask_2d.ffill(dim).sum(sp_dims - {dim})
# True in the inner zone, False in the outer
inner_mask = inner_mask & (left != 0) & (right != 0)
# inner_mask including the shapes
inner_mask = mask_2d.notnull() | inner_mask
# loop through variables
for v in ds_copy.data_vars:
if set.issubset(sp_dims, set(ds_copy[v].dims)):
# 1st mask values outside shape, then drop values outside inner_mask
ds_copy[v] = ds_copy[v].where(mask_2d.notnull())
# Remove grid points outside the inner mask
# Then extract the coords.
# Using a where(inner_mask) on ds_copy triggers warnings with dask, sel seems safer.
mask_2d = mask_2d.where(inner_mask, drop=True)
for dim in sp_dims:
ds_copy = ds_copy.sel({dim: mask_2d[dim]})
# Add a CRS definition using CF conventions and as a global attribute in CRS_WKT for reference purposes
ds_copy.attrs["crs"] = raster_crs.to_string()
ds_copy["crs"] = 1
ds_copy["crs"].attrs.update(raster_crs.to_cf())
for v in ds_copy.variables:
if {lat.name, lon.name}.issubset(set(ds_copy[v].dims)):
ds_copy[v].attrs["grid_mapping"] = "crs"
if isinstance(ds, xarray.DataArray):
return ds._from_temp_dataset(ds_copy)
return ds_copy | 2d751cd4a9300645cb9bc7b1b353dc29da388f96 | 3,650,692 |
def plot_record_static(
record,
save=True,
scale=1000,
select_kw={},
x_prop='wavenumber',
**kwargs
):
"""Figure of Static data from a record.
High level function.
record: Record to get data from
save: Boolean, Save figure
scale: Scale y axis.
select_kw: dict passed to select method
Returns
fig and ax.
"""
fig, ax = plt.subplots(num='{}_static'.format(record.name))
fig.clf()
select_kw.setdefault('delay_mean', True)
select_kw.setdefault('frame_med', True)
select_kw.setdefault('prop', 'unpumped')
data = record.select(**select_kw)
plot_spec(record.select(x_prop), scale*data, **kwargs)
plt.title("{}".format(record.lname))
fname = 'figures/{}_static.pdf'.format(record.name)
print(fname)
if save:
plt.savefig(fname)
print("saved")
return fig, ax | 4a25068f7df9450870af81fb2507f6262db61b42 | 3,650,693 |
def logmelspectrogram(wave: np.ndarray, conf: ConfMelspec) -> np.ndarray:
"""Convert a waveform to a scaled mel-frequency log-amplitude spectrogram.
Args:
wave::ndarray[Time,] - waveform
conf - Configuration
Returns::(Time, Mel_freq) - mel-frequency log(Bel)-amplitude spectrogram
"""
# mel-frequency linear-amplitude spectrogram :: [Freq=n_mels, T_mel]
mel_freq_amp_spec = librosa.feature.melspectrogram(
y=wave,
sr=conf.sampling_rate,
n_fft=conf.n_fft,
hop_length=conf.hop_length,
n_mels=conf.n_mels,
fmin=conf.fmin,
fmax=conf.fmax,
# norm=,
power=1,
pad_mode="reflect",
)
# [-inf, `min_db`, `ref_db`, +inf] dB(ref=1,power) => [`min_db_rel`/20, `min_db_rel`/20, 0, +inf]
min_db = conf.ref_db + conf.min_db_rel
ref, amin = db_to_linear(conf.ref_db), db_to_linear(min_db)
# `power_to_db` hack for linear-amplitude spec to log-amplitude spec conversion
mel_freq_log_amp_spec = librosa.power_to_db(mel_freq_amp_spec, ref=ref, amin=amin, top_db=None)
mel_freq_log_amp_spec_bel = mel_freq_log_amp_spec/10.
mel_freq_log_amp_spec_bel = mel_freq_log_amp_spec_bel.T
return mel_freq_log_amp_spec_bel | d4849092495b097b8efb292826eb020c8775157c | 3,650,694 |
def get_trainer_config(env_config, train_policies, num_workers=9, framework="tf2"):
"""Build configuration for 1 run."""
# trainer config
config = {
"env": env_name, "env_config": env_config, "num_workers": num_workers,
# "multiagent": {"policy_mapping_fn": lambda x: x, "policies": policies,
# "policies_to_train": train_policies},
"framework": framework,
"train_batch_size": 512,
'batch_mode': 'truncate_episodes',
"callbacks": TraceMallocCallback,
"lr": 0.0,
"num_gpus": 1,
}
return config | 4452d0e037b4bc49a5b027d4f0f6dd2993eceac2 | 3,650,695 |
def SamAng(Tth,Gangls,Sangl,IFCoup):
"""Compute sample orientation angles vs laboratory coord. system
:param Tth: Signed theta
:param Gangls: Sample goniometer angles phi,chi,omega,azmuth
:param Sangl: Sample angle zeros om-0, chi-0, phi-0
:param IFCoup: True if omega & 2-theta coupled in CW scan
:returns:
psi,gam: Sample odf angles
dPSdA,dGMdA: Angle zero derivatives
"""
if IFCoup:
GSomeg = sind(Gangls[2]+Tth)
GComeg = cosd(Gangls[2]+Tth)
else:
GSomeg = sind(Gangls[2])
GComeg = cosd(Gangls[2])
GSTth = sind(Tth)
GCTth = cosd(Tth)
GSazm = sind(Gangls[3])
GCazm = cosd(Gangls[3])
GSchi = sind(Gangls[1])
GCchi = cosd(Gangls[1])
GSphi = sind(Gangls[0]+Sangl[2])
GCphi = cosd(Gangls[0]+Sangl[2])
SSomeg = sind(Sangl[0])
SComeg = cosd(Sangl[0])
SSchi = sind(Sangl[1])
SCchi = cosd(Sangl[1])
AT = -GSTth*GComeg+GCTth*GCazm*GSomeg
BT = GSTth*GSomeg+GCTth*GCazm*GComeg
CT = -GCTth*GSazm*GSchi
DT = -GCTth*GSazm*GCchi
BC1 = -AT*GSphi+(CT+BT*GCchi)*GCphi
BC2 = DT-BT*GSchi
BC3 = AT*GCphi+(CT+BT*GCchi)*GSphi
BC = BC1*SComeg*SCchi+BC2*SComeg*SSchi-BC3*SSomeg
psi = acosd(BC)
BD = 1.0-BC**2
C = np.where(BD>1.e-6,rpd/np.sqrt(BD),0.)
dPSdA = [-C*(-BC1*SSomeg*SCchi-BC2*SSomeg*SSchi-BC3*SComeg),
-C*(-BC1*SComeg*SSchi+BC2*SComeg*SCchi),
-C*(-BC1*SSomeg-BC3*SComeg*SCchi)]
BA = -BC1*SSchi+BC2*SCchi
BB = BC1*SSomeg*SCchi+BC2*SSomeg*SSchi+BC3*SComeg
gam = atan2d(BB,BA)
BD = (BA**2+BB**2)/rpd
dBAdO = 0
dBAdC = -BC1*SCchi-BC2*SSchi
dBAdF = BC3*SSchi
dBBdO = BC1*SComeg*SCchi+BC2*SComeg*SSchi-BC3*SSomeg
dBBdC = -BC1*SSomeg*SSchi+BC2*SSomeg*SCchi
dBBdF = BC1*SComeg-BC3*SSomeg*SCchi
dGMdA = np.where(BD > 1.e-6,[(BA*dBBdO-BB*dBAdO)/BD,(BA*dBBdC-BB*dBAdC)/BD, \
(BA*dBBdF-BB*dBAdF)/BD],[np.zeros_like(BD),np.zeros_like(BD),np.zeros_like(BD)])
return psi,gam,dPSdA,dGMdA | 7b7efb995e9a3a2c659e5392c5c2b5b836b2750f | 3,650,696 |
def debye_C_V(T,thetaD,natoms):
"""
Returns the heat capacity at constant volume, C_V, of the Debeye model at a
given temperature, T, in meV/atom/K.
"""
C_V = 4*debye_func(thetaD/T)-3*(thetaD/T)/(sp.exp(thetaD/T)-1.)
C_V = 3*natoms*BOLTZCONST*C_V
return C_V | 08e708f9c9ce16ed5c54e6f1bc920dd4be81e31e | 3,650,697 |
def setup(clip=True, flip=True):
"""
Project specific data import and setup function
:param clip: bool - use clipping
:param flip: bool - use flipping
:return: data as pandas dataframe, List[LAICPMSData obj]
"""
# calibration
# Zn: y = 0.0395 kcps/(µg/g)* x + 1.308 kcps
# use inverse calibration function to get conc from counts; transformation m = 1/m and b = -1 * b/m
calibration_functions = {
'Zn:64': lambda x: 1/0.0395 * x - 1.308/0.0395,
}
# data files
filenames = ["../data/LA_Data_C1SA1.csv",
"../data/LA_Data_C2SA1.csv",
"../data/LA_Data_C3SA1.csv",
"../data/LA_Data_C4SA1.csv",
"../data/LA_Data_C1SB1.csv",
"../data/LA_Data_C2SB1.csv",
"../data/LA_Data_C3SB1.csv",
"../data/LA_Data_C4SB1.csv",
"../data/LA_Data_C1SC1.csv",
"../data/LA_Data_C2SC1.csv",
"../data/LA_Data_C3SC1.csv",
"../data/LA_Data_C4SC1.csv"]
# short sample names
smpl_names = ["A_1",
"A_2",
"A_3",
"A_4",
"B_1",
"B_2",
"B_3",
"B_4",
"C_1",
"C_2",
"C_3",
"C_4"]
# list on how to flip the data to get matching orientations, h = horizontally, v = vertically
if flip:
flip_list = [
'h',
'v',
'h',
'h',
'h',
'h',
'v',
'v',
'v',
'h',
'h',
'h'
]
else:
flip_list = ['no' for i in range(0, len(filenames))]
# clip data to windows of defined size
# main reason is comparability & tissue folds
if clip:
#100 px x 150 px
clip_list = [
(70,170,30,180),
(70,170,30,180),
(50,150,30,180),
(60,160,50,200),
(30,130,30,180),
(40,140,30,180),
(40,140,30,180),
(40,140,30,180),
(60,160,20,170),
(60,160,20,170),
(60,160,20,170),
(60,160,20,170),
]
else:
clip_list = [None for i in range(0, len(filenames))]
ms_data = []
data = []
# here the data gets processed into LAICPMSData objects - one per file
# data contains all Zn:64 data - masked/segmented based on P:31 content
for smpl, filename, clip, flip in zip(smpl_names, filenames, clip_list, flip_list):
curr_ms_data = LAICPMSData(filename=filename, clip_data_around_center=clip, flip=flip, pixel_dimensions=(15,15))
# only assign directly if you know what you are doing!
curr_ms_data._calibration_functions = calibration_functions
ms_data.append(curr_ms_data)
data.append(curr_ms_data.get_masked_data(element_list=['Zn:64'], discriminator='P:31', only_on_tissue=True))
data[-1]['sample'] = [smpl for i in range(0, len(data[-1]))]
return pd.concat(data, ignore_index=True), ms_data | 2f62f124512daff76b59e3e93a2b50de6d5ca0be | 3,650,698 |
import array
def choi_to_kraus(q_oper):
"""
Takes a Choi matrix and returns a list of Kraus operators.
TODO: Create a new class structure for quantum channels, perhaps as a
strict sub-class of Qobj.
"""
vals, vecs = eig(q_oper.data.todense())
vecs = list(map(array, zip(*vecs)))
return list(map(lambda x: Qobj(inpt=x),
[sqrt(vals[j]) * vec2mat(vecs[j])
for j in range(len(vals))])) | 850465f98442ce8c8fa329a293a9b519e4a7202e | 3,650,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.