content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def irrf(valor=0):
"""
-> Função para cálcular o valor do IRRF.
:param valor: Valor base do salário para cálculo do IRRF.
:return: Retorna o valor do IRRF e alíquota utilizada.
"""
irrf = []
if valor < 1903.99:
irrf.append(0)
irrf.append(0)
elif valor >= 1903.99 and valor <= 2826.65:
irrf.append((valor * 7.5) / 100 - 142.80) # Alíquota de 7.5%, menos parcela de dedução.
irrf.append('7,5')
elif valor >= 2826.66 and valor <= 3751.05:
irrf.append((valor * 15) / 100 - 354.80) # Alíquota de 15%, menos parcela de dedução.
irrf.append('15')
elif valor >= 3751.06 and valor <= 4664.68:
irrf.append((valor * 22.5) / 100 - 636.13) # Alíquota de 22.5%, menos parcela de dedução.
irrf.append('22,5')
elif valor > 4664.68:
irrf.append((valor * 27.5) / 100 - 869.36) # Alíquota de 27.5%, menos parcela de dedução.
irrf.append('27,5')
return irrf | 53646b770b2c2359e1e8c4f725b27396cc972050 | 3,655,700 |
def find_adcp_files_within_period(working_directory,max_gap=20.0,max_group_size=6):
"""
Sorts a directory of ADCPRdiWorkHorseData raw files into groups by
closeness in time, with groups being separated by more than
'max_gap_minutes'. This method first sorts the files by start time, and
then splits the observations where they are more than
'max_gap_minutes' apart.
Inputs:
working_directory = directory path containing ADCP raw or netcdf files
max_gap = maximum time allowed between ADCP observations when grouping (minutes)
max_group_size = maximum number of ADCPData objects per group
Returns:
List of lists that contain groups of input ADCPData objects
"""
if os.path.exists(working_directory):
data_files = glob.glob(os.path.join(working_directory,'*[rR].000'))
data_files.extend(glob.glob(os.path.join(working_directory,'*.nc')))
else:
print "Path (%s) not found - exiting."%working_directory
exit()
start_times = list()
for data_file in data_files:
try:
a = adcpy.open_adcp(data_file,
file_type="ADCPRdiWorkhorseData",
num_av=1)
start_times.append(a.mtime[0])
except:
start_times.append(None)
if start_times:
gaps, nn, nnan = find_start_time_gaps(start_times)
data_files_sorted = [ data_files[i] for i in nn ]
# convert nnan boolean list to integer index
nnan_i = nnan * range(len(nnan))
data_files_sorted = [ data_files_sorted[i] for i in nnan_i ]
return group_according_to_gap(data_files_sorted,gaps,max_gap,max_group_size) | 6e3afc4dd8532c579870541fe42519078e86f935 | 3,655,701 |
def regular_transport_factory(host, port, env, config_file):
"""
Basic unencrypted Thrift transport factory function.
Returns instantiated Thrift transport for use with cql.Connection.
Params:
* host .........: hostname of Cassandra node.
* port .........: port number to connect to.
* env ..........: environment variables (os.environ) - not used by this implementation.
* config_file ..: path to cqlsh config file - not used by this implementation.
"""
tsocket = TSocket.TSocket(host, port)
return TTransport.TFramedTransport(tsocket) | bccee131d61a9a251a63ee021e0ab0c5b6033c44 | 3,655,702 |
def smoothed_abs(x, eps=1e-8):
"""A smoothed version of |x| with improved numerical stability."""
return jnp.sqrt(jnp.multiply(x, x) + eps) | f0b63e9482e602b29b85ce3f0d602d9918557ada | 3,655,703 |
def increment(t1, seconds):
"""Adds seconds to a Time object."""
assert valid_time(t1)
seconds += time_to_int(t1)
return int_to_time(seconds) | f7807fc12a9ed9350d13d0f8c4c707c79165e9d5 | 3,655,704 |
import os
def add_absname(file):
"""Prefix a file name with the working directory."""
work_dir = os.path.dirname(__file__)
return os.path.join(work_dir, file) | 34d78ff980cbe16ace897cf164563badc9d36d2a | 3,655,705 |
def dense(input_shape, output_shape, output_activation='linear', name=None):
"""
Build a simple Dense model
Parameters
----------
input_shape: shape
Input shape
output_shape: int
Number of actions (Discrete only so far)
Returns
-------
model: Model
Keras tf model
"""
# Create inputs
inputs = Input(shape=input_shape)
x = Flatten()(inputs)
# Create one dense layer and one layer for output
x = Dense(256, activation='tanh')(x)
x = Dense(256, activation='tanh')(x)
predictions = Dense(output_shape, activation='linear')(x)
# Finally build model
model = Model(inputs=inputs, outputs=predictions, name=name)
model.summary()
return model | 6f7ba28834ecfe7b5e74aa40ef30fcd9aa531836 | 3,655,706 |
def dataset_labels(alldata, tag=None):
""" Return label for axis of dataset
Args:
ds (DataSet): dataset
tag (str): can be 'x', 'y' or 'z'
"""
if tag == 'x':
d = alldata.default_parameter_array()
return d.set_arrays[0].label
if tag == 'y':
d = alldata.default_parameter_array()
return d.set_arrays[1].label
if tag is None or tag == 'z':
d = alldata.default_parameter_array()
return d.label
return '?' | 4ccd3af38d3f18e9fbf43e98f8a898426c6c1440 | 3,655,707 |
from typing import Optional
from typing import Any
from typing import Callable
from typing import Tuple
from typing import List
from typing import Union
def spread(
template: Template,
data: Optional[Any],
flavor: Flavor,
postprocess: Optional[Callable] = None,
start_at: int = 0,
replace_missing_with: Optional[str] = None,
) -> Tuple[List[Union["pygsheets.Cell"]], int]:
"""Spread data into cells.
Parameters
----------
template
A list of expressions which determines how the cells are layed out.
data
Data to render. Can be a dictionary, a dataclass, a list; just as long as the template
expressions can be applied to the data.
flavor
Determines what kind of cells to generate.
postprocess
An optional function to call for each cell once it has been created.
start_at
The row number where the layout begins. Zero-based.
replace_missing_with
An optional value to be used when a variable isn't found in the data. An exception is
raised if a variable is not found and this is not specified.
Returns
-------
cells
The list of cells.
n_rows
The number of rows which the cells span over.
"""
data = data or {}
# Unpack the template
table = []
for c, col in enumerate(template):
cells = []
if callable(col):
col = col(data)
for r, expr in enumerate(col if isinstance(col, list) else [col]):
if callable(expr):
expr = expr(data)
# expr can be:
# - expr
# - (expr, postprocessor)
# - (expr, postprocessor, note)
pp = None
note = None
if isinstance(expr, tuple):
if len(expr) == 2:
expr, pp = expr
else:
expr, pp, note, *_ = expr
cell = _Cell(
r=r + start_at,
c=c,
expr=_normalize_expression(expr),
note=note,
postprocess=pp,
)
cells.append(cell)
table.append(cells)
# We're going to add the positions of the named variables to the data
named_variables = {}
cell_names = {}
for c, col in enumerate(table):
for r, cell in enumerate(col):
if _is_named_formula(cell.expr):
name = cell.expr.split(" = ")[0]
named_variables[name] = cell.address
cell_names[len(cell_names)] = name
elif _is_variable(cell.expr):
cell_names[len(cell_names)] = cell.expr[1:]
else:
cell_names[len(cell_names)] = None
if flavor == Flavor.PYGSHEETS.value:
cells = [
cell.as_pygsheets(
data=data,
named_variables=named_variables,
replace_missing_with=replace_missing_with,
)
for col in table
for cell in col
]
else:
raise ValueError(
f"Unknown flavor {flavor}. Available options: {', '.join(f.value for f in Flavor)}"
)
if postprocess:
for i, cell in enumerate(cells):
cells[i] = postprocess(cell, cell_names[i])
n_rows = max(map(len, table))
return cells, n_rows | db354b3d190f1bff5b78c29a3ff6b4021287b27f | 3,655,708 |
from sklearn.model_selection import train_test_split
def train_validate_test_split(DataFrame, ratios=(0.6,0.2,0.2)):
"""
Parameters
----------
DataFrame : pandas.DataFrame
DataFrame
ratios : tuple
E.g.
(train, validate, test) = (0.6, 0.25, 0.15)
(train, test) = (0.6, 0.4) -> validate = test
Returns
-------
TrainDataset : pandas.DataFrame
ValidateDataset : pandas.DataFrame
TestDataset : pandas.DataFrame
"""
N = len(DataFrame.index)
if len(ratios)==3:
train_size = ratios[0]/np.sum(ratios)
test_size = ratios[2]/np.sum(ratios[1:3])
TrainDataset, TestDataset = train_test_split(DataFrame, train_size=train_size, random_state=42)
ValidateDataset, TestDataset = train_test_split(TestDataset, test_size=test_size, random_state=42)
elif len(ratios)==2:
train_size = ratios[0]/np.sum(ratios)
TrainDataset, TestDataset = train_test_split(DataFrame, train_size=train_size, random_state=42)
ValidateDataset = TestDataset
print('Validate = Test')
else:
print('ERROR in splitting train, validate, test')
return None, None, None
n_train = len(TrainDataset.index)
n_validate = len(ValidateDataset.index)
n_test = len(TestDataset.index)
print('Train Samples: {} [{:.1f}%]'.format(n_train, n_train/N*100))
print('Validate Samples: {} [{:.1f}%]'.format(n_validate, n_validate/N*100))
print('Test Samples: {} [{:.1f}%]'.format(n_test, n_test/N*100))
return TrainDataset, ValidateDataset, TestDataset | 3d4b8424f66e72d3dd28328afb6465768b1778cb | 3,655,709 |
from typing import Union
def is_error(code: Union[Error, int]) -> bool:
"""Returns True, if error is a (fatal) error, not just a warning."""
if isinstance(code, Error): code = code.code
return code >= ERROR | 347bde61feb36ce70bf879d713ff9feb41e67085 | 3,655,710 |
def unpack_triple(item):
"""Extracts the indices and values from an object.
The argument item can either be an instance of SparseTriple or a
sequence of length three.
Example usage:
>>> st = SparseTriple()
>>> ind1, ind2, val = unpack_triple(st)
>>> quad_expr = [[], [], []]
>>> ind1, ind2, val = unpack_triple(quad_expr)
"""
try:
assert item.isvalid()
ind1, ind2, val = item.unpack()
except AttributeError:
ind1, ind2, val = item[0:3]
validate_arg_lengths([ind1, ind2, val])
return ind1, ind2, val | bae536d313140952927875640f925876700bf981 | 3,655,711 |
def max_sequence(arr):
"""
The maximum sum subarray problem consists in finding the maximum sum of a contiguous subsequence in an array or
list of integers.
:param arr: an array or list of integers.
:return: the maximum value found within the subarray.
"""
best = 0
for x in range(len(arr)):
for y in range(len(arr)):
if sum(arr[x:y+1]) > best:
best = sum(arr[x:y+1])
return best | 3ae6dafb4879476ba6e15610645f26299a4c6719 | 3,655,712 |
def get_by_username(username):
"""
Retrieve a user from the database by their username
:param username:
:return:
"""
return database.get(User, username, field="username") | 354d323c464cbdbaf72b88284b2305657d03a027 | 3,655,713 |
def evalPoint(u, v):
"""
Evaluates the surface point corresponding to normalized parameters (u, v)
"""
a, b, c, d = 0.5, 0.3, 0.5, 0.1
s = TWO_PI * u
t = (TWO_PI * (1 - v)) * 2
r = a + b * cos(1.5 * t)
x = r * cos(t)
y = r * sin(t)
z = c * sin(1.5 * t)
dv = PVector()
dv.x = (-1.5 * b * sin(1.5 * t) * cos(t) -
(a + b * cos(1.5 * t)) * sin(t))
dv.y = (-1.5 * b * sin(1.5 * t) * sin(t) +
(a + b * cos(1.5 * t)) * cos(t))
dv.z = 1.5 * c * cos(1.5 * t)
q = dv
q.normalize()
qvn = PVector(q.y, -q.x, 0)
qvn.normalize()
ww = q.cross(qvn)
pt = PVector()
pt.x = x + d * (qvn.x * cos(s) + ww.x * sin(s))
pt.y = y + d * (qvn.y * cos(s) + ww.y * sin(s))
pt.z = z + d * ww.z * sin(s)
return pt | a3598739dc28e9fcd47539e4a51b00c351eb4e3d | 3,655,714 |
def decode_funcname2(subprogram_die, address):
""" Get the function name from an PC address"""
for DIE in subprogram_die:
try:
lowpc = DIE.attributes['DW_AT_low_pc'].value
# DWARF v4 in section 2.17 describes how to interpret the
# DW_AT_high_pc attribute based on the class of its form.
# For class 'address' it's taken as an absolute address
# (similarly to DW_AT_low_pc); for class 'constant', it's
# an offset from DW_AT_low_pc.
highpc_attr = DIE.attributes['DW_AT_high_pc']
highpc_attr_class = describe_form_class(highpc_attr.form)
if highpc_attr_class == 'address':
highpc = highpc_attr.value
elif highpc_attr_class == 'constant':
highpc = lowpc + highpc_attr.value
else:
print('Error: invalid DW_AT_high_pc class:',
highpc_attr_class)
continue
if lowpc <= address < highpc:
return DIE.attributes['DW_AT_name'].value
except KeyError:
continue
return None | b322282b9f908311dedbd73ade3d31bbb86cebe8 | 3,655,715 |
def get_reddit_slug(permalink):
"""
Get the reddit slug from a submission permalink, with '_' replaced by '-'
Args:
permalink (str): reddit submission permalink
Returns:
str: the reddit slug for a submission
"""
return list(filter(None, permalink.split("/")))[-1].replace("_", "-") | 587239a0b7bbd88e10d49985dd6ebfd3768038d8 | 3,655,716 |
def newton_halley(func, x0, fprime, fprime2, args=(), tol=1.48e-8,
maxiter=50, disp=True):
"""
Find a zero from Halley's method using the jitted version of
Scipy's.
`func`, `fprime`, `fprime2` must be jitted via Numba.
Parameters
----------
func : callable and jitted
The function whose zero is wanted. It must be a function of a
single variable of the form f(x,a,b,c...), where a,b,c... are extra
arguments that can be passed in the `args` parameter.
x0 : float
An initial estimate of the zero that should be somewhere near the
actual zero.
fprime : callable and jitted
The derivative of the function (when available and convenient).
fprime2 : callable and jitted
The second order derivative of the function
args : tuple, optional
Extra arguments to be used in the function call.
tol : float, optional
The allowable error of the zero value.
maxiter : int, optional
Maximum number of iterations.
disp : bool, optional
If True, raise a RuntimeError if the algorithm didn't converge
Returns
-------
results : namedtuple
root - Estimated location where function is zero.
function_calls - Number of times the function was called.
iterations - Number of iterations needed to find the root.
converged - True if the routine converged
"""
if tol <= 0:
raise ValueError("tol is too small <= 0")
if maxiter < 1:
raise ValueError("maxiter must be greater than 0")
# Convert to float (don't use float(x0); this works also for complex x0)
p0 = 1.0 * x0
funcalls = 0
status = _ECONVERR
# Halley Method
for itr in range(maxiter):
# first evaluate fval
fval = func(p0, *args)
funcalls += 1
# If fval is 0, a root has been found, then terminate
if fval == 0:
status = _ECONVERGED
p = p0
itr -= 1
break
fder = fprime(p0, *args)
funcalls += 1
# derivative is zero, not converged
if fder == 0:
p = p0
break
newton_step = fval / fder
# Halley's variant
fder2 = fprime2(p0, *args)
p = p0 - newton_step / (1.0 - 0.5 * newton_step * fder2 / fder)
if abs(p - p0) < tol:
status = _ECONVERGED
break
p0 = p
if disp and status == _ECONVERR:
msg = "Failed to converge"
raise RuntimeError(msg)
return _results((p, funcalls, itr + 1, status)) | 96531b47a399ee0d897e5feadaa93eb56bee2b52 | 3,655,717 |
def staff_dash(request):
"""Route for displaying the staff dashboard of the site.
"""
# Empty context to populate:
context = {}
def get_account_name(path):
"""Method contains logic to extract the app name from a url path.
Method uses the django.urls.resolve method with basic string splitting.
"""
try:
appname = resolve(path).func.__module__.split(".")[1]
except:
appname = None
return appname
# Ensuring that the user is a staff member if not redirect home:
if request.user.is_staff is False:
return redirect("user_account_dashboard")
else:
# Determining a one month window for queying request data:
prev_month = date.today() - timedelta(days=30)
# Querying all of the requests made to the database in the last month:
max_queryset = Request.objects.filter(time__gt=prev_month)
# QuerySet to Dataframe Conversions:
requests_timeseries = max_queryset.values_list("time", "response", "method", "path", "user")
timeframe_df = pd.DataFrame.from_records(requests_timeseries, columns=["time", "response", "method", "path", "user"])
# Adding columns:
timeframe_df["_count"] = 1
timeframe_df['app'] = timeframe_df["path"].apply(lambda x: get_account_name(x))
timeframe_df.set_index(timeframe_df['time'], inplace=True)
# Resampling/Transforming data:
daily_resample_get = timeframe_df.loc[timeframe_df['method'] == 'GET', "_count"].squeeze().resample('H').sum()
daily_resample_posts = timeframe_df.loc[timeframe_df['method'] != 'GET', "_count"].squeeze().resample('H').sum()
# Extracting Series for all response codes:
daily_200_response = timeframe_df.loc[timeframe_df["response"] < 300, "_count"]
daily_300_response = timeframe_df.loc[
(timeframe_df["response"] >= 300) & (timeframe_df["response"] < 400), "_count"]
daily_400_response = timeframe_df.loc[
(timeframe_df["response"] >= 400) & (timeframe_df["response"] < 500), "_count"]
daily_500_response = timeframe_df.loc[timeframe_df["response"] >= 500, "_count"]
# Building a dict of unique get/post timeseries based on unique apps:
app_timeseries_dict = {}
# Getting relevant list of installed apps:
third_party_apps = [app.split(".")[0] for app in settings.INSTALLED_APPS
if not app.startswith("django.") and
app not in ['rest_framework', 'rest_framework.authtoken', 'rest_auth', 'request']
]
for app in third_party_apps:
# Nested dict structure for GET and POST request storage:
application_dict = {}
# Populating application dict w/ GET and POST request timeseries:
try:
app_timeseries_get = timeframe_df.loc[
(timeframe_df["app"] == app) & (timeframe_df["method"] == "GET"), "_count"].resample("H").sum()
application_dict["GET"] = {
"Data" : app_timeseries_get.values.tolist(),
"Index": app_timeseries_get.index.tolist()
}
except:
application_dict["GET"] = [0] * len(daily_resample_get.index)
try:
app_timeseries_post = timeframe_df.loc[
(timeframe_df["app"] == app) & (timeframe_df["method"] == "POST"), "_count"].resample("H").sum()
application_dict["POST"] = {
"Data": app_timeseries_post.values.tolist(),
"Index": app_timeseries_post.index.tolist()
}
except:
application_dict["POST"] = [0] * len(daily_resample_get.index)
# Fully Building nested dict:
app_timeseries_dict[app] = application_dict
print(len(application_dict["GET"]["Data"]), len(application_dict["GET"]['Index']))
# Seralzing dataframe columns to pass to template:
context['get_datetime'] = daily_resample_get.index.tolist()
# Error-Catching daily response codes when resampling:
response_code_dict = {}
try:
response_code_dict[200] = daily_200_response.squeeze().resample("H").sum().values.tolist()
except Exception:
response_code_dict[200] = [0] * len(daily_resample_get.index)
try:
response_code_dict[300] = daily_300_response.squeeze().resample("H").sum().values.tolist()
except Exception:
response_code_dict[300] = [0] * len(daily_resample_get.index)
try:
response_code_dict[400] = daily_400_response.squeeze().resample("H").sum().values.tolist()
except Exception:
response_code_dict[400] = [0] * len(daily_resample_get.index)
try:
response_code_dict[500] = daily_500_response.squeeze().resample("H").sum().values.tolist()
except Exception:
response_code_dict[500] = [0] * len(daily_resample_get.index)
# Populating Context:
context['app_timeseries'] = app_timeseries_dict
context['get_requests_count'] = daily_resample_get.values.tolist()
context['post_requests_count'] = daily_resample_posts.values.tolist()
context['response_codes'] = response_code_dict
return render(request, "accounts/staff_dashboard.html", context) | 83d1d3027b64349dba5560934ba9d7bdb3536c91 | 3,655,718 |
import csv
def read_v1_file(path: str = "CBETHUSD.csv") -> tuple:
"""
Read the data from the file path, reconstruct the format the the data
and return a 3d matrix.
"""
lst = []
res = []
with open(path) as data:
reader = csv.reader(data)
next(reader) # skip the header row
for row in reader:
lst.append(float(row[1]))
lst_con = []
for i in range(len(lst) - 30):
temp = lst[i:i + 25]
lst_con.append(temp)
res_temp = lst[i + 30] - temp[-1]
res_cat = [0, 0, 0]
if abs(res_temp) < abs(temp[-1] * 0.05):
res_cat[1] = 1
elif res_temp < 0:
res_cat[0] = 1
else:
res_cat[2] = 1
res.append(res_cat)
np_lst = np.array(lst_con).reshape(len(lst_con), 25, 1)
np_res = np.array(res)
return (np_lst, np_res) | 6fd80fda5f327464e63f34df1f16b923349bc7a4 | 3,655,719 |
import torch
def get_adjacent_th(spec: torch.Tensor, filter_length: int = 5) -> torch.Tensor:
"""Zero-pad and unfold stft, i.e.,
add zeros to the beginning so that, using the multi-frame signal model,
there will be as many output frames as input frames.
Args:
spec (torch.Tensor): input spectrum (B, F, T, 2)
filter_length (int): length for frame extension
Returns:
ret (torch.Tensor): output spectrum (B, F, T, filter_length, 2)
""" # noqa: D400
return (
torch.nn.functional.pad(spec, pad=[0, 0, filter_length - 1, 0])
.unfold(dimension=-2, size=filter_length, step=1)
.transpose(-2, -1)
.contiguous()
) | 4009b41fd4e729e16c749f4893f61b61ca922215 | 3,655,720 |
def K2(eps):
""" Radar dielectric factor |K|**2
Parameters
----------
eps : complex
nd array of complex relative dielectric constants
Returns
-------
nd - float
Radar dielectric factor |K|**2 real
"""
K_complex = (eps-1.0)/(eps+2.0)
return (K_complex*K_complex.conj()).real | 8754bee38a46de14d205764c4843cad7c4d5d88f | 3,655,721 |
def permutation_test_mi(x, y, B=100, random_state=None, **kwargs):
"""Permutation test for mutual information
Parameters
----------
x : 1d array-like
Array of n elements
y : 1d array-like
Array of n elements
n_classes : int
Number of classes
B : int
Number of permutations
random_state : int
Sets seed for random number generator
Returns
-------
p : float
Achieved significance level
"""
np.random.seed(random_state)
# Estimate correlation from original data
theta = mi(x, y)
# Permutations
y_ = y.copy()
theta_p = np.zeros(B)
for i in range(B):
np.random.shuffle(y_)
theta_p[i] = mi(x, y_)
# Achieved significance level
return np.mean(theta_p >= theta) | ea60f7ddf483f3a095971ab3c52a07e34ac863d5 | 3,655,722 |
def convert_time_units(value, value_unit="s", result_unit="s", case_sensitive=True):
"""
Convert `value` from `value_unit` to `result_unit`.
The possible time units are ``'s'``,``'ms'``, ``'us'``, ``'ns'``, ``'ps'``, ``'fs'``, ``'as'``.
If ``case_sensitive==True``, matching units is case sensitive.
"""
if string_utils.string_equal(value_unit,"s",case_sensitive=case_sensitive):
value_s=value
elif string_utils.string_equal(value_unit,"ms",case_sensitive=case_sensitive):
value_s=value*1E-3
elif string_utils.string_equal(value_unit,"us",case_sensitive=case_sensitive):
value_s=value*1E-6
elif string_utils.string_equal(value_unit,"ns",case_sensitive=case_sensitive):
value_s=value*1E-9
elif string_utils.string_equal(value_unit,"ps",case_sensitive=case_sensitive):
value_s=value*1E-12
elif string_utils.string_equal(value_unit,"fs",case_sensitive=case_sensitive):
value_s=value*1E-15
elif string_utils.string_equal(value_unit,"as",case_sensitive=case_sensitive):
value_s=value*1E-18
else:
raise IOError("unrecognized length unit: {0}".format(value_unit))
if string_utils.string_equal(result_unit,"s",case_sensitive=case_sensitive):
return value_s
elif string_utils.string_equal(result_unit,"ms",case_sensitive=case_sensitive):
return value_s*1E3
elif string_utils.string_equal(result_unit,"us",case_sensitive=case_sensitive):
return value_s*1E6
elif string_utils.string_equal(result_unit,"ns",case_sensitive=case_sensitive):
return value_s*1E9
elif string_utils.string_equal(result_unit,"ps",case_sensitive=case_sensitive):
return value_s*1E12
elif string_utils.string_equal(result_unit,"fs",case_sensitive=case_sensitive):
return value_s*1E15
elif string_utils.string_equal(result_unit,"as",case_sensitive=case_sensitive):
return value_s*1E18
else:
raise IOError("unrecognized length unit: {0}".format(result_unit)) | dab3fdb88a5d137d45efe440a6075cd0339194ac | 3,655,723 |
import tqdm
def compute_distribution_clusters(columns: list, dataset_name: str, threshold: float, pool: Pool,
chunk_size: int = None, quantiles: int = 256):
"""
Algorithm 2 of the paper "Automatic Discovery of Attributes in Relational Databases" from M. Zhang et al. [1]. This
algorithm captures which columns contain data with similar distributions based on the EMD distance metric.
Parameters
---------
columns : list(str)
The columns of the database
dataset_name : str
Other name of the dataset
threshold : float
The conservative global EMD cutoff threshold described in [1]
pool: multiprocessing.Pool
The process pool that will be used in the pre-processing of the table's columns
chunk_size : int, optional
The number of chunks of each job process (default let the framework decide)
quantiles : int, optional
The number of quantiles that the histograms are split on (default is 256)
Returns
-------
list(list(str))
A list that contains the distribution clusters that contain the column names in the cluster
"""
combinations = list(column_combinations(columns, dataset_name, quantiles, intersection=False))
total = len(combinations)
if chunk_size is None:
chunk_size = int(calc_chunksize(pool._processes, total))
A: dict = transform_dict(dict(tqdm(pool.imap_unordered(process_emd, combinations, chunksize=chunk_size),
total=total)))
edges_per_column = list(pool.map(parallel_cutoff_threshold, list(cuttoff_column_generator(A, columns, dataset_name,
threshold))))
graph = create_graph(columns, edges_per_column)
connected_components = list(nx.connected_components(graph))
return connected_components | bdbdf233c02f6eced3504543c3adbd8ea12505f7 | 3,655,724 |
def get_eventframe_sequence(event_deque, is_x_first, is_x_flipped,
is_y_flipped, shape, data_format, frame_width,
frame_gen_method):
"""
Given a single sequence of x-y-ts events, generate a sequence of binary
event frames.
"""
inp = []
while len(event_deque) > 0:
inp.append(get_binary_frame(event_deque, is_x_first, is_x_flipped,
is_y_flipped, shape, data_format,
frame_width, frame_gen_method))
return np.stack(inp, -1) | 9d65bfa59c42b327cc7f5c02a044f545ec5f5a5e | 3,655,725 |
def creation_sequence_to_weights(creation_sequence):
"""
Returns a list of node weights which create the threshold
graph designated by the creation sequence. The weights
are scaled so that the threshold is 1.0. The order of the
nodes is the same as that in the creation sequence.
"""
# Turn input sequence into a labeled creation sequence
first = creation_sequence[0]
if isinstance(first, str): # creation sequence
if isinstance(creation_sequence, list):
wseq = creation_sequence[:]
else:
wseq = list(creation_sequence) # string like 'ddidid'
elif isinstance(first, tuple): # labeled creation sequence
wseq = [v[1] for v in creation_sequence]
elif isinstance(first, int): # compact creation sequence
wseq = uncompact(creation_sequence)
else:
raise TypeError("Not a valid creation sequence type")
# pass through twice--first backwards
wseq.reverse()
w = 0
prev = 'i'
for j, s in enumerate(wseq):
if s == 'i':
wseq[j] = w
prev = s
elif prev == 'i':
prev = s
w += 1
wseq.reverse() # now pass through forwards
for j, s in enumerate(wseq):
if s == 'd':
wseq[j] = w
prev = s
elif prev == 'd':
prev = s
w += 1
# Now scale weights
if prev == 'd':
w += 1
wscale = 1. / float(w)
return [ww * wscale for ww in wseq]
# return wseq | 80147c53ccb7f44fdca148cc422a0c149a5b7864 | 3,655,726 |
def get_seg_features(string):
"""
Segment text with jieba
features are represented in bies format
s donates single word
"""
seg_feature = []
for word in jieba.cut(string):
if len(word) == 1:
seg_feature.append(0)
else:
tmp = [2] * len(word)
tmp[0] = 1
tmp[-1] = 3
seg_feature.extend(tmp)
return seg_feature | 505ba3064cacc2719e11126ce504b8c84abe10e9 | 3,655,727 |
def print_device_info(nodemap):
"""
This function prints the device information of the camera from the transport
layer; please see NodeMapInfo example for more in-depth comments on printing
device information from the nodemap.
:param nodemap: Transport layer device nodemap.
:type nodemap: INodeMap
:return: True if successful, False otherwise.
:rtype: bool
"""
print('\n*** DEVICE INFORMATION ***\n')
try:
result = True
node_device_information = PySpin.CCategoryPtr(nodemap.GetNode('DeviceInformation'))
if PySpin.IsAvailable(node_device_information) and PySpin.IsReadable(node_device_information):
features = node_device_information.GetFeatures()
for feature in features:
node_feature = PySpin.CValuePtr(feature)
print('%s: %s' % (node_feature.GetName(),
node_feature.ToString() if PySpin.IsReadable(node_feature) else 'Node not readable'))
else:
print('Device control information not available.')
except PySpin.SpinnakerException as ex:
print('Error: %s' % ex.message)
return False
return result | 7f0affa8e8acaab48df8dc96c631ca9043f07482 | 3,655,728 |
import sys
def make_withdrawal(account):
"""Withdrawal Dialog."""
# @TODO: Use questionary to capture the withdrawal and set it equal to amount variable. Be sure that amount is a floating
# point number.
amount = questionary.text("How much would you like to withdraw").ask()
amount = float(amount)
# @TODO: Validates amount of withdrawal. If less than or equal to 0 system exits with error message.
if amount <= account["balance"]:
account["balance"] = account["balance"] - amount
print("Your withdrawl was successful")
return account
else:
sys.exit(
"Your do not have enough money in your account to make this withdrawl. PLease try again."
)
# @TODO: Validates if withdrawal amount is less than or equal to account balance, processes withdrawal and returns account.
# Else system exits with error messages indicating that the account is short of funds. | c4d9da902da3b6b85950cd6faa1b6e582e0509fe | 3,655,729 |
from typing import Optional
def coerce_to_pendulum_date(x: PotentialDatetimeType,
assume_local: bool = False) -> Optional[Date]:
"""
Converts something to a :class:`pendulum.Date`.
Args:
x: something that may be coercible to a date
assume_local: if ``True``, assume local timezone; if ``False``, assume
UTC
Returns:
a :class:`pendulum.Date`, or ``None``.
Raises:
pendulum.parsing.exceptions.ParserError: if a string fails to parse
ValueError: if no conversion possible
"""
p = coerce_to_pendulum(x, assume_local=assume_local)
return None if p is None else p.date() | d2fb5d830736290eb9ddadf9fcd664d0cba88d4b | 3,655,730 |
def loss_fixed_depl_noquench(params, loss_data):
"""
MSE loss function for fitting individual stellar mass histories.
Only main sequence efficiency parameters. Quenching is deactivated.
Depletion time is fixed at tau=0Gyr, i.e. gas conversion is instantaenous.
"""
(
lgt,
dt,
dmhdt,
log_mah,
sm_target,
log_sm_target,
sfr_target,
fstar_target,
index_select,
fstar_indx_high,
fstar_tdelay,
ssfrh_floor,
weight,
weight_fstar,
t_fstar_max,
fixed_tau,
q_params,
) = loss_data
sfr_params = [*params[0:4], fixed_tau]
_res = calculate_sm_sfr_fstar_history_from_mah(
lgt,
dt,
dmhdt,
log_mah,
sfr_params,
q_params,
index_select,
fstar_indx_high,
fstar_tdelay,
)
mstar, sfr, fstar = _res
mstar = jnp.log10(mstar)
fstar = jnp.log10(fstar)
sfr_res = 1e8 * (sfr - sfr_target) / sm_target
sfr_res = jnp.clip(sfr_res, -1.0, 1.0)
loss = jnp.mean(((mstar - log_sm_target) / weight) ** 2)
loss += jnp.mean(((fstar - fstar_target) / weight_fstar) ** 2)
loss += jnp.mean((sfr_res / weight) ** 2)
qt = _get_bounded_qt(q_params[0])
loss += _sigmoid(qt - t_fstar_max, 0.0, 50.0, 100.0, 0.0)
return loss | c987b17b2a64081006addf8ed9af6a3535b77bdd | 3,655,731 |
def plot_timeseries_comp(date1, value1, date2, value2, fname_list,
labelx='Time [UTC]', labely='Value',
label1='Sensor 1', label2='Sensor 2',
titl='Time Series Comparison', period1=0, period2=0,
ymin=None, ymax=None, dpi=72):
"""
plots 2 time series in the same graph
Parameters
----------
date1 : datetime object
time of the first time series
value1 : float array
values of the first time series
date2 : datetime object
time of the second time series
value2 : float array
values of the second time series
fname_list : list of str
list of names of the files where to store the plot
labelx : str
The label of the X axis
labely : str
The label of the Y axis
label1, label2 : str
legend label for each time series
titl : str
The figure title
period1, period2 : float
measurement period in seconds used to compute accumulation. If 0 no
accumulation is computed
dpi : int
dots per inch
ymin, ymax : float
The limits of the Y-axis. None will keep the default limit.
Returns
-------
fname_list : list of str
list of names of the created plots
History
--------
201?.??.?? -fvj- created
2017.08.21 -jgr- changed some graphical aspects
"""
if (period1 > 0) and (period2 > 0):
# TODO: document this and check (sometimes artefacts)
value1 *= (period1/3600.)
value1 = np.ma.cumsum(value1)
value2 *= (period2/3600.)
value2 = np.ma.cumsum(value2)
fig, ax = plt.subplots(figsize=[10, 6.5], dpi=dpi)
ax.plot(date1, value1, 'b', label=label1, linestyle='--', marker='o')
ax.plot(date2, value2, 'r', label=label2, linestyle='--', marker='s')
ax.legend(loc='best')
ax.set_xlabel(labelx)
ax.set_ylabel(labely)
ax.set_title(titl)
ax.grid()
ax.set_ylim(bottom=ymin, top=ymax)
ax.set_xlim([date2[0], date2[-1]])
# rotates and right aligns the x labels, and moves the bottom of the
# axes up to make room for them
fig.autofmt_xdate()
# Make a tight layout
fig.tight_layout()
for fname in fname_list:
fig.savefig(fname, dpi=dpi)
plt.close(fig)
return fname_list | a33be4ba8fbbaa2c8ee31694a4ede0d43deb171c | 3,655,732 |
def identityMatrix(nrow, ncol):
"""
Create a identity matrix of the given dimensions
Works for square Matrices
Retuns a Matrix Object
"""
if nrow == ncol:
t = []
for i in range(nrow):
t.append([])
for j in range(ncol):
if i == j:
t[i].append(1)
else:
t[i].append(0)
s = Matrix(nrow=nrow, ncol=ncol, data=t)
s.matrix.symmetry=True
s.matrix.trace=nrow
s.matrix.invertibility=True
setattr(s.matrix,"identityMatrix",True)
return s
else:
raise incompaitableTypeException | 3584c75cd0683f4dd547ac9708d03cdc5500dcef | 3,655,733 |
def extract_packages(matched, package_source):
"""
Extract packages installed in the "Successfully installed" line
e.g.
Successfully installed Abjad Jinja2-2.10 MarkupSafe-1.0 PyPDF2-1.26.0 Pygments-2.2.0 alabaster-0.7.10 \
babel-2.5.1 bleach-2.1.2 decorator-4.1.2 docutils-0.14 entrypoints-0.2.3 html5lib-1.0.1 imagesize-0.7.1 \
ipykernel-4.7.0 ipython-6.2.1 ipython-genutils-0.2.0 ipywidgets-7.1.0 jedi-0.11.1 jsonschema-2.6.0 \
jupyter-1.0.0 jupyter-client-5.2.1 jupyter-console-5.2.0 jupyter-core-4.4.0 mistune-0.8.3 nbconvert-5.3.1 \
nbformat-4.4.0 notebook-5.2.2 pandocfilters-1.4.2 parso-0.1.1 pexpect-4.3.1 pickleshare-0.7.4 \
prompt-toolkit-1.0.15 ....
"""
result = []
package_list = matched.groups()[0].split(' ')
for package in package_list:
package, version = split_package_and_version(package)
if not version or not package:
continue
else:
source = package_source.get(package)
if source is None:
continue
# The following line is recommended when developing
# assert source == PACKAGE_SOURCE_INDEX
result.append('{}=={}'.format(package, version))
return result | a4cf9c18122dd89b2c46647fa328ab72c4d7dd8a | 3,655,734 |
from typing import List
from typing import Optional
from datetime import datetime
def create_relationship(
relationship_type: str,
created_by: Identity,
source: _DomainObject,
target: _DomainObject,
confidence: int,
object_markings: List[MarkingDefinition],
start_time: Optional[datetime] = None,
stop_time: Optional[datetime] = None,
) -> Relationship:
"""Create a relationship."""
return Relationship(
created_by_ref=created_by,
relationship_type=relationship_type,
source_ref=source,
target_ref=target,
start_time=start_time,
stop_time=stop_time,
confidence=confidence,
object_marking_refs=object_markings,
allow_custom=True,
) | 4d961aae8521c53c61090823484e8b12862b29e0 | 3,655,735 |
def _find_partition(G, starting_cell):
""" Find a partition of the vertices of G into cells of complete graphs
Parameters
----------
G : NetworkX Graph
starting_cell : tuple of vertices in G which form a cell
Returns
-------
List of tuples of vertices of G
Raises
------
NetworkXError
If a cell is not a complete subgraph then G is not a line graph
"""
G_partition = G.copy()
P = [starting_cell] # partition set
G_partition.remove_edges_from(list(combinations(starting_cell, 2)))
# keep list of partitioned nodes which might have an edge in G_partition
partitioned_vertices = list(starting_cell)
while G_partition.number_of_edges() > 0:
# there are still edges left and so more cells to be made
u = partitioned_vertices[-1]
deg_u = len(G_partition[u])
if deg_u == 0:
# if u has no edges left in G_partition then we have found
# all of its cells so we do not need to keep looking
partitioned_vertices.pop()
else:
# if u still has edges then we need to find its other cell
# this other cell must be a complete subgraph or else G is
# not a line graph
new_cell = [u] + list(G_partition[u])
for u in new_cell:
for v in new_cell:
if (u != v) and (v not in G_partition[u]):
msg = (
"G is not a line graph"
"(partition cell not a complete subgraph)"
)
raise nx.NetworkXError(msg)
P.append(tuple(new_cell))
G_partition.remove_edges_from(list(combinations(new_cell, 2)))
partitioned_vertices += new_cell
return P | 92c63176d6c2f366c549a24982dbc64c9879a9b7 | 3,655,736 |
import torch
def projection_from_Rt(rmat, tvec):
"""
Compute the projection matrix from Rotation and translation.
"""
assert len(rmat.shape) >= 2 and rmat.shape[-2:] == (3, 3), rmat.shape
assert len(tvec.shape) >= 2 and tvec.shape[-2:] == (3, 1), tvec.shape
return torch.cat([rmat, tvec], dim=-1) | 90039ba7002be31d347b7793d542b1ff37abae3e | 3,655,737 |
def verify_df(df, constraints_path, epsilon=None, type_checking=None,
**kwargs):
"""
Verify that (i.e. check whether) the Pandas DataFrame provided
satisfies the constraints in the JSON .tdda file provided.
Mandatory Inputs:
df A Pandas DataFrame, to be checked.
constraints_path The path to a JSON .tdda file (possibly
generated by the discover_constraints
function, below) containing constraints
to be checked.
Optional Inputs:
epsilon When checking minimum and maximum values
for numeric fields, this provides a
tolerance. The tolerance is a proportion
of the constraint value by which the
constraint can be exceeded without causing
a constraint violation to be issued.
With the default value of epsilon
(EPSILON_DEFAULT = 0.01, i.e. 1%), values
can be up to 1% larger than a max constraint
without generating constraint failure,
and minimum values can be up to 1% smaller
that the minimum constraint value without
generating a constraint failure. (These
are modified, as appropraite, for negative
values.)
NOTE: A consequence of the fact that these
are proportionate is that min/max values
of zero do not have any tolerance, i.e.
the wrong sign always generates a failure.
type_checking: 'strict' or 'sloppy'.
Because Pandas silently, routinely and
automatically "promotes" integer and boolean
columns to reals and objects respectively
if they contain nulls, strict type checking
can be problematical in Pandas. For this reason,
type_checking defaults to 'sloppy', meaning
that type changes that could plausibly be
attriuted to Pandas type promotion will not
generate constraint values.
If this is set to strict, a Pandas "float"
column c will only be allowed to satisfy a
an "int" type constraint if
c.dropnulls().astype(int) == c.dropnulls().
Similarly, Object fields will satisfy a
'bool' constraint only if
c.dropnulls().astype(bool) == c.dropnulls().
report: 'all' or 'fields'
This controls the behaviour of the __str__
method on the resulting PandasVerification
object (but not its content).
The default is 'all', which means that
all fields are shown, together with the
verification status of each constraint
for that field.
If report is set to 'fields', only fields for
which at least one constraint failed are shown.
NOTE: The method also accepts
'constraints', which will be used to
indicate that only failing constraints for
failing fields should be shown.
This behaviour is not yet implented.
Returns:
PandasVerification object. This object has attributes:
passed # Number of passing constriants
failures # Number of failing constraints
It also has a .to_frame() method for converting the results
of the verification to a Pandas DataFrame, and a __str__
method to print both the detailed and summary results of
the verification.
Example usage (see tdda/constraints/examples/simple_verification.py
for slightly fuller example).
import pandas as pd
from tdda.constraints.pdconstraints import verify_df
df = pd.DataFrame({'a': [0, 1, 2, 10, pd.np.NaN],
'b': ['one', 'one', 'two', 'three', pd.np.NaN]})
v = verify_df(df, 'example_constraints.tdda')
print('Passes:', v.passes)
print('Failures: %d\n' % v.failures)
print(str(v))
print(v.to_frame())
"""
pdv = PandasConstraintVerifier(df, epsilon=epsilon,
type_checking=type_checking)
constraints = DatasetConstraints(loadpath=constraints_path)
return verify(constraints, pdv.verifiers(),
VerificationClass=PandasVerification, **kwargs) | 477180d390e3090ec7d8211b8cee7235d58d4eba | 3,655,738 |
import re
def _getallstages_pm(pmstr):
"""pmstr: a pipelinemodel name in quote
return a df: of all leaf stages of transformer.
to print return in a cell , use print_return(df)
"""
pm=eval(pmstr)
output=[]
for i,s in enumerate(pm.stages):
if str(type(s))=="<class 'pyspark.ml.pipeline.PipelineModel'>":
pmstr2=f"{pmstr}.stages[{i}]"
output.append(_getallstages_pm(pmstr2))
else:
tn=re.sub(r"^.*\.(\w+)\b.*",r"\1",str(type(s)))
pmstr2=f"{pmstr}.stages[{i}]"
temp=pd.DataFrame([[pmstr2,tn,None,None,None]],columns=['stage','transformer_name','inputcol','outputcol','other_parameters'])
if temp.transformer_name.iloc[0]=="SQLTransformer":
st='"statement=\n'+re.sub('\t',' ',eval(pmstr2).getStatement())+'"'
if len(st)>=32767:
idx1=st.rfind('\n',0,10000)
idx2=st.find('\n',len(st)-10000,len(st))
newst=st[:idx1]+"\n\n..........\n"+st[idx2:]
st=newst.replace("statement=","TRUNCATED !!!\n\nstatement=")
temp["other_parameters"]=st
elif temp.transformer_name.iloc[0]=="CountVectorizerModel":
temp["other_parameters"]="vocabulary="+str(eval(pmstr2).vocabulary)
elif temp.transformer_name.iloc[0]=="RFormulaModel":
temp["outputcol"]=[value for key, value in eval(pmstr2).extractParamMap().items() if key.name=='featuresCol']
form="formular: "+[value for key, value in eval(pmstr2).extractParamMap().items() if key.name=='formula'][0]
temp["other_parameters"]=f"number of inputCol in formula: {form.count('+')+1}"
elif temp.transformer_name.iloc[0]=='LogisticRegressionModel':
label=[value for key, value in eval(pmstr2).extractParamMap().items() if key.name=='labelCol'][0]
elasticNetParam=[value for key, value in eval(pmstr2).extractParamMap().items() if key.name=='elasticNetParam'][0]
regParam=[value for key, value in eval(pmstr2).extractParamMap().items() if key.name=='regParam'][0]
temp["other_parameters"]=f"labelCol : {label}, elasticNetParam : {elasticNetParam}, regParam : {regParam}"
else:
ip=[value for key, value in eval(pmstr2).extractParamMap().items() if key.name=='inputCol']
if len(ip)>0:
temp["inputcol"]=ip
op=[value for key, value in eval(pmstr2).extractParamMap().items() if key.name=='outputCol']
if len(op)>0:
temp["outputcol"]=op
output.append(temp)
outputdf=pd.concat(output)
outputdf=outputdf.reset_index(drop=True)
return outputdf | 8bb5643361aa5aa74c1ba477d725b575a2f15f0b | 3,655,739 |
def merge_config(log_conf: LogConf, conf: Config) -> Config:
"""
Create combined config object from system wide logger setting and current logger config
"""
#pylint: disable=too-many-locals
name = conf.name # take individual conf value, ignore common log_conf value
filename = _ITEM_OR_DEFAULT(log_conf.filename, conf.filename)
logger_level = _ITEM_OR_DEFAULT(log_conf.logger_level, conf.logger_level)
log_fmt = _ITEM_OR_DEFAULT(log_conf.log_fmt, conf.log_fmt)
log_datefmt = _ITEM_OR_DEFAULT(log_conf.log_datefmt, conf.log_datefmt)
log_level = _ITEM_OR_DEFAULT(log_conf.log_level, conf.log_level)
log_enabled = _ITEM_OR_DEFAULT(log_conf.log_enabled, conf.log_enabled)
cout_fmt = _ITEM_OR_DEFAULT(log_conf.cout_fmt, conf.cout_fmt)
cout_datefmt = _ITEM_OR_DEFAULT(log_conf.cout_datefmt, conf.cout_datefmt)
cout_level = _ITEM_OR_DEFAULT(log_conf.cout_level, conf.cout_level)
cout_enabled = _ITEM_OR_DEFAULT(log_conf.cout_enabled, conf.cout_enabled)
propagate = _ITEM_OR_DEFAULT(log_conf.propagate, conf.propagate)
log_dir = _ITEM_OR_DEFAULT(log_conf.log_dir, conf.log_dir)
sub_dir = _ITEM_OR_DEFAULT(log_conf.sub_dir, conf.sub_dir)
override_allowed = conf.override_allowed # take individual conf value, ignore common log_conf value
n_conf: Config = Config(name, filename, logger_level, log_fmt, log_datefmt, log_level, log_enabled, cout_fmt,
cout_datefmt, cout_level, cout_enabled, propagate, log_dir, sub_dir, override_allowed)
return n_conf | f62d7a48d83dd201323ff710c17b4ffbf39750bc | 3,655,740 |
def midpVector(x):
""" return midpoint value (=average) in each direction
"""
if type(x) != list:
raise Exception("must be list")
dim = len(x)
#nx = x[0].shape
for i in range(1,dim):
if type(x[i]) != np.ndarray:
raise Exception("must be numpy array")
#if x[i].shape != nx:
# raise Exception("dimensions mismatch")
avgx = []
for ifield in range(dim):
avgx.append([])
avgx[ifield] = midpScalar(x[ifield])
return avgx | 784dcfdeb012aa114167d4b965409ca2f81ed414 | 3,655,741 |
def buy_ticket(email, name, quantity):
"""
Attmempt to buy a ticket in the database
:param owner: the email of the ticket buyer
:param name: the name of the ticket being bought
:param quantity: the quantity of tickets being bought
:return: an error message if there is any, or None if register succeeds
"""
user = User.query.filter_by(email=email).first()
tik = Ticket.query.filter_by(name=name).first()
user.balance = user.balance - (tik.price * quantity * 1.40)
if tik.quantity == quantity:
db.session.delete(tik)
else:
tik.quantity = tik.quantity - quantity
db.session.commit()
return None | cd64f745a44180594edce14eb0645f808ac645d8 | 3,655,742 |
from typing import Tuple
def update_bounds(
sig: float,
eps: float,
target_eps: float,
bounds: np.ndarray,
bound_eps: np.ndarray,
consecutive_updates: int
) -> Tuple[np.ndarray, np.ndarray, int]: # noqa:E121,E125
""" Updates bounds for sigma around a target privacy epsilon.
Updates the lower bound for sigma if `eps` is larger than `target_eps` and
the upper bound otherwise.
:param sig: A new value for sigma.
:param eps: The corresponding value for epsilon.
:param target_eps: The target value for epsilon.
:param bounds: Tuple containing a lower and upper bound for the sigma
corresponding to target_eps.
:param bound_eps: The corresponding epsilon values for the bounds.
:param consecutive_updates: Tuple counting the number of consecutive updates
for lower and upper bound.
:return: updated bounds, bound_eps and consecutive_updates
"""
assert(eps <= bound_eps[0])
assert(eps >= bound_eps[1])
if eps > target_eps:
bounds[0] = sig
bound_eps[0] = eps
consecutive_updates = [consecutive_updates[0] + 1, 0]
else:
bounds[1] = sig
bound_eps[1] = eps
consecutive_updates = [0, consecutive_updates[1] + 1]
return bounds, bound_eps, consecutive_updates | a3426220fe20a4857ac51048ab8d703decaf3e9f | 3,655,743 |
def get_timeseries(rics, fields='*', start_date=None, end_date=None,
interval='daily', count=None,
calendar=None, corax=None, normalize=False, raw_output=False, debug=False):
"""
Returns historical data on one or several RICs
Parameters
----------
rics: string or list of strings
Single RIC or List of RICs to retrieve historical data for
start_date: string or datetime.datetime or datetime.timedelta
Starting date and time of the historical range.
string format is: '%Y-%m-%dT%H:%M:%S'. e.g. '2016-01-20T15:04:05'.
datetime.timedelta is negative number of day relative to datetime.now().
Default: datetime.now() + timedelta(-100)
You can use the helper function get_date_from_today, please see the usage in the examples section
end_date: string or datetime.datetime or datetime.timedelta
End date and time of the historical range.
string format could be
- '%Y-%m-%d' (e.g. '2017-01-20')
- '%Y-%m-%dT%H:%M:%S' (e.g. '2017-01-20T15:04:05')
datetime.timedelta is negative number of day relative to datetime.now().
Default: datetime.now()
You can use the helper function get_date_from_today, please see the usage in the examples section
interval: string
Data interval.
Possible values: 'tick', 'minute', 'hour', 'daily', 'weekly', 'monthly', 'quarterly', 'yearly' (Default 'daily')
Default: 'daily'
fields: string or list of strings
Use this parameter to filter the returned fields set.
Available fields: 'TIMESTAMP', 'VALUE', 'VOLUME', 'HIGH', 'LOW', 'OPEN', 'CLOSE', 'COUNT'
By default all fields are returned.
count: int, optional
Max number of data points retrieved.
calendar: string, optional
Possible values: 'native', 'tradingdays', 'calendardays'.
corax: string, optional
Possible values: 'adjusted', 'unadjusted'
normalize: boolean, optional
If set to True, the function will return a normalized data frame with the following columns 'Date','Security','Field'.
If the value of this parameter is False the returned data frame shape will depend on the number of rics and the number of fields in the response.
There are three different shapes:
- One ric and many fields
- Many rics and one field
- Many rics and many fields
Default: False
Remark: This parameter has a less precedence than the parameter rawOutput i.e. if rawOutput is set to True, the returned data will be the raw data and this parameter will be ignored
raw_output: boolean, optional
Set this parameter to True to get the data in json format
if set to False, the function will return a data frame which shape is defined by the parameter normalize
Default: False
debug: boolean, optional
When set to True, the json request and response are printed.
Default: False
Raises
------
Exception
If request fails or if server returns an error.
ValueError
If a parameter type or value is wrong.
Examples
--------
>>> import eikon as ek
>>> ek.set_app_key('set your app key here')
>>> req = ek.get_timeseries(["MSFT.O"], start_date = "2017-02-01T15:04:05",
>>> end_date = "2017-02-05T15:04:05", interval="tick")
>>> req = ek.get_timeseries(["MSFT.O"], start_date = "2017-03-01",
>>> end_date = "2017-03-10", interval="daily")
>>> req = ek.get_timeseries(["MSFT.O"], start_date = get_date_from_today(150),
>>> end_date = get_date_from_today(100), interval="daily")
"""
logger = eikon.Profile.get_profile().logger
# set the ric(s) in the payload
check_for_string_or_list_of_strings(rics, 'rics')
if is_string_type(rics):
rics = [rics.strip()]
if type(rics) == list:
rics = [ric.upper() if ric.islower() else ric for ric in rics]
# set the field(s) in the payload
if fields is None or fields == '*':
fields = ['*']
else:
check_for_string_or_list_of_strings(fields, 'fields')
if is_string_type(fields):
fields = fields.strip().upper().split()
else:
fields = [x.upper() for x in fields]
if '*' in fields:
fields = ['*']
elif 'TIMESTAMP' not in fields:
fields.append('TIMESTAMP')
# check the interval in the payload
check_for_string(interval, 'interval')
if start_date is None:
start_date = get_date_from_today(100)
if end_date is None:
end_date = get_date_from_today(0)
start_date = to_datetime(start_date).isoformat()
end_date = to_datetime(end_date).isoformat()
if start_date > end_date:
with 'end date ({})should be after than start date ({})'.format(end_date, start_date) as error_msg:
logger.error(error_msg)
raise ValueError(error_msg)
payload = {'rics': rics, 'fields': fields, 'interval': interval, 'startdate': start_date, 'enddate': end_date}
# Add optional parameters
# set the count in the payload
if count is not None:
check_for_int(count, 'count')
payload.update({'count': count})
# set the calendar in the payload
if calendar is not None:
if is_string_type(calendar):
payload.update({'calendar': calendar})
else:
with 'calendar must has string type' as error_msg:
logger.error(error_msg)
raise ValueError(error_msg)
# set the corax in the payload
if corax is not None:
if is_string_type(corax):
payload.update({'corax': corax})
else:
with 'corax must be a string' as error_msg:
logger.error(error_msg)
raise ValueError(error_msg)
ts_result = eikon.json_requests.send_json_request(TimeSeries_UDF_endpoint, payload, debug=debug)
# Catch all errors to raise a warning
ts_timeserie_data = ts_result['timeseriesData']
ts_status_errors = [ts_data for ts_data in ts_timeserie_data if get_json_value(ts_data, 'statusCode') == 'Error']
ts_error_messages = ''
for ts_status in ts_status_errors:
ts_error_message = get_json_value(ts_status, 'errorMessage')
ts_error_message = ts_error_message[ts_error_message.find("Description"):]
ts_instrument = get_json_value(ts_status, 'ric')
ts_error_message = ts_error_message.replace('Description', ts_instrument)
ts_error_messages += ts_error_message
ts_error_messages += ' | '
warning_message = 'Error with {}'.format(ts_error_message)
logger.warning(warning_message)
# if all timeseries are in error, then raise EikonError with all error messages
if len(ts_status_errors) == len(ts_timeserie_data):
logger.error(ts_error_messages)
raise EikonError(-1, message=ts_error_messages)
if raw_output: return ts_result
data_frame = None
if normalize:
data_frame = NormalizedDataFrame_Formatter(ts_result).get_data_frame()
else:
data_frame = NiceDataFrame_Formatter(ts_result).get_data_frame()
if len(data_frame) > 0:
data_frame = data_frame.fillna(pd.np.nan)
return data_frame | 52f8cb2f5fc422df0c9b474f879797b997ae3a4d | 3,655,744 |
def usd(value):
"""Format value as USD."""
return f"${value:,.2f}" | 022502cebaced49e21a311fe0bed6feead124ee9 | 3,655,745 |
def random_mindist(N, mindist, width, height):
"""Create random 2D points with a minimal distance to each other.
Args:
N(int): number of points to generate
mindist(float): Minimal distance between each point
width(float): Specifies [0, width) for the x-coordinate
height(float): Specifies [0, height) for the y-coordinate
Returns:
np.array(shape=[N, 2]): matrix of coordinates
"""
Pts = np.empty(shape=[0, 2])
n = 0
while n < N:
X = random_uniform(1, width, height)
# rejection sampling
if closest_euclidean(X, Pts) > mindist:
Pts = np.vstack((Pts, X))
n = n+1
return Pts | 261627e47e72b95d90f9b9c409ce61535f2a4cf7 | 3,655,746 |
def deactivate_spotting(ID):
"""
Function to deactivate a spotting document in Elasticsearch
Params:
ID::str
id of the document to deactivate
Returns:
bool
If the changes have been applied or not
"""
if not ID:
return False
try:
global INDEX
body = get_document(INDEX, ID)['_source']
body['is_active'] = False
create_or_update_document(INDEX, ID, body)
return True
except NotFoundError:
print("No documents found at deactivate_spotting")
return False
except Exception as e:
print("Exception @ deactivate_spotting\n{}".format(e))
return None | 381c79a08e990b64a0a1032b5b54b874b8c53926 | 3,655,747 |
import watools.General.raster_conversions as RC
import watools.Functions.Start as Start
import numpy as np
def Fraction_Based(nc_outname, Startdate, Enddate):
"""
This functions calculated monthly total supply based ETblue and fractions that are given in the get dictionary script
Parameters
----------
nc_outname : str
Path to the NetCDF containing the data
Startdate : str
Contains the start date of the model 'yyyy-mm-dd'
Enddate : str
Contains the end date of the model 'yyyy-mm-dd'
Returns
-------
DataCube_Tot_Sup : Array
Array containing the total supply [time,lat,lon]
DataCube_Non_Consumed : Array
Array containing the amount of non consumed water [time,lat,lon]
"""
# import water accounting plus modules
# import general modules
# Open Arrays
DataCube_LU = RC.Open_nc_array(nc_outname, "Landuse")
DataCube_ETblue = RC.Open_nc_array(nc_outname, "Blue_Evapotranspiration", Startdate, Enddate)
# Get Classes
LU_Classes = Start.Get_Dictionaries.get_sheet5_classes()
LU_Classes_Keys = list(LU_Classes.keys())
# Get fractions
consumed_fractions_dict = Start.Get_Dictionaries.consumed_fractions()
# Create Array for consumed fractions
DataCube_Consumed_Fractions = np.ones(DataCube_LU.shape) * np.nan
# Create array with consumed_fractions
for Classes_LULC in LU_Classes_Keys:
Values_LULC = LU_Classes[Classes_LULC]
for Value_LULC in Values_LULC:
DataCube_Consumed_Fractions[DataCube_LU == Value_LULC] = consumed_fractions_dict[Classes_LULC]
# Calculated Total Supply
DataCube_Tot_Sup = DataCube_ETblue[:,:,:] / DataCube_Consumed_Fractions[None,:,:]
# Calculated Non consumed
DataCube_Non_Consumed = DataCube_Tot_Sup - DataCube_ETblue
return(DataCube_Tot_Sup, DataCube_Non_Consumed) | 378c149cc239eee31b10d90235b78cf15527b0e0 | 3,655,748 |
def _qrd_solve(r, pmut, ddiag, bqt, sdiag):
"""Solve an equation given a QR factored matrix and a diagonal.
Parameters:
r - **input-output** n-by-n array. The full lower triangle contains
the full lower triangle of R. On output, the strict upper
triangle contains the transpose of the strict lower triangle of
S.
pmut - n-vector describing the permutation matrix P.
ddiag - n-vector containing the diagonal of the matrix D in the base
problem (see below).
bqt - n-vector containing the first n elements of B Q^T.
sdiag - output n-vector. It is filled with the diagonal of S. Should
be preallocated by the caller -- can result in somewhat greater
efficiency if the vector is reused from one call to the next.
Returns:
x - n-vector solving the equation.
Compute the n-vector x such that
A^T x = B, D x = 0
where A is an n-by-m matrix, B is an m-vector, and D is an n-by-n
diagonal matrix. We are given information about pivoted QR
factorization of A with permutation, such that
A P = R Q
where P is a permutation matrix, Q has orthogonal rows, and R is lower
triangular with nonincreasing diagonal elements. Q is m-by-m, R is
n-by-m, and P is n-by-n. If x = P z, then we need to solve
R z = B Q^T,
P^T D P z = 0 (why the P^T? and do these need to be updated for the transposition?)
If the system is rank-deficient, these equations are solved as well as
possible in a least-squares sense. For the purposes of the LM
algorithm we also compute the lower triangular n-by-n matrix S such
that
P^T (A^T A + D D) P = S^T S. (transpose?)
"""
n, m = r.shape
# "Copy r and bqt to preserve input and initialize s. In
# particular, save the diagonal elements of r in x." Recall that
# on input only the full lower triangle of R is meaningful, so we
# can mirror that into the upper triangle without issues.
for i in range(n):
r[i,i:] = r[i:,i]
x = r.diagonal().copy()
zwork = bqt.copy()
# "Eliminate the diagonal matrix d using a Givens rotation."
for i in range(n):
# "Prepare the row of D to be eliminated, locating the
# diagonal element using P from the QR factorization."
li = pmut[i]
if ddiag[li] == 0:
sdiag[i] = r[i,i]
r[i,i] = x[i]
continue
sdiag[i:] = 0
sdiag[i] = ddiag[li]
# "The transformations to eliminate the row of d modify only a
# single element of (q transpose)*b beyond the first n, which
# is initially zero."
bqtpi = 0.
for j in range(i, n):
# "Determine a Givens rotation which eliminates the
# appropriate element in the current row of D."
if sdiag[j] == 0:
continue
if abs(r[j,j]) < abs(sdiag[j]):
cot = r[j,j] / sdiag[j]
sin = 0.5 / np.sqrt(0.25 + 0.25 * cot**2)
cos = sin * cot
else:
tan = sdiag[j] / r[j,j]
cos = 0.5 / np.sqrt(0.25 + 0.25 * tan**2)
sin = cos * tan
# "Compute the modified diagonal element of r and the
# modified element of ((q transpose)*b,0)."
r[j,j] = cos * r[j,j] + sin * sdiag[j]
temp = cos * zwork[j] + sin * bqtpi
bqtpi = -sin * zwork[j] + cos * bqtpi
zwork[j] = temp
# "Accumulate the transformation in the row of s."
if j + 1 < n:
temp = cos * r[j,j+1:] + sin * sdiag[j+1:]
sdiag[j+1:] = -sin * r[j,j+1:] + cos * sdiag[j+1:]
r[j,j+1:] = temp
# Save the diagonal of S and restore the diagonal of R
# from its saved location in x.
sdiag[i] = r[i,i]
r[i,i] = x[i]
# "Solve the triangular system for z. If the system is singular
# then obtain a least squares solution."
nsing = n
for i in range(n):
if sdiag[i] == 0.:
nsing = i
zwork[i:] = 0
break
if nsing > 0:
zwork[nsing-1] /= sdiag[nsing-1] # Degenerate case
# "Reverse loop"
for i in range(nsing - 2, -1, -1):
s = np.dot(zwork[i+1:nsing], r[i,i+1:nsing])
zwork[i] = (zwork[i] - s) / sdiag[i]
# "Permute the components of z back to components of x."
x[pmut] = zwork
return x | 3e9d75c135734770c248a39de5770c3b033262da | 3,655,749 |
import re
def find_version():
"""Extract the version number from the CLI source file."""
with open('pyweek.py') as f:
for l in f:
mo = re.match('__version__ = *(.*)?\s*', l)
if mo:
return eval(mo.group(1))
else:
raise Exception("No version information found.") | 128f2399a37b27412d2fdf6cf0901c1486709a09 | 3,655,750 |
import pandas.core.algorithms as algos
def remove_unused_levels(self):
"""
create a new MultiIndex from the current that removing
unused levels, meaning that they are not expressed in the labels
The resulting MultiIndex will have the same outward
appearance, meaning the same .values and ordering. It will also
be .equals() to the original.
.. versionadded:: 0.20.0
Returns
-------
MultiIndex
Examples
--------
>>> i = pd.MultiIndex.from_product([range(2), list('ab')])
MultiIndex(levels=[[0, 1], ['a', 'b']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> i[2:]
MultiIndex(levels=[[0, 1], ['a', 'b']],
codes=[[1, 1], [0, 1]])
The 0 from the first level is not represented
and can be removed
>>> i[2:].remove_unused_levels()
MultiIndex(levels=[[1], ['a', 'b']],
codes=[[0, 0], [0, 1]])
"""
new_levels = []
new_labels = []
changed = False
for lev, lab in zip(self.levels, self.labels):
# Since few levels are typically unused, bincount() is more
# efficient than unique() - however it only accepts positive values
# (and drops order):
uniques = np.where(np.bincount(lab + 1) > 0)[0] - 1
has_na = int(len(uniques) and (uniques[0] == -1))
if len(uniques) != len(lev) + has_na:
# We have unused levels
changed = True
# Recalculate uniques, now preserving order.
# Can easily be cythonized by exploiting the already existing
# "uniques" and stop parsing "lab" when all items are found:
uniques = algos.unique(lab)
if has_na:
na_idx = np.where(uniques == -1)[0]
# Just ensure that -1 is in first position:
uniques[[0, na_idx[0]]] = uniques[[na_idx[0], 0]]
# labels get mapped from uniques to 0:len(uniques)
# -1 (if present) is mapped to last position
label_mapping = np.zeros(len(lev) + has_na)
# ... and reassigned value -1:
label_mapping[uniques] = np.arange(len(uniques)) - has_na
lab = label_mapping[lab]
# new levels are simple
lev = lev.take(uniques[has_na:])
new_levels.append(lev)
new_labels.append(lab)
result = self._shallow_copy()
if changed:
result._reset_identity()
result._set_levels(new_levels, validate=False)
result._set_labels(new_labels, validate=False)
return result | 8f07a2b943278d5d5ae7d78ab2c10e96acd349e4 | 3,655,751 |
def _transform_playlist(playlist):
"""Transform result into a format that more
closely matches our unified API.
"""
transformed_playlist = dict([
('source_type', 'spotify'),
('source_id', playlist['id']),
('name', playlist['name']),
('tracks', playlist['tracks']['total']),
])
return transformed_playlist | 62c19c132cbb9438c7a4b993e1d79111b79b86fd | 3,655,752 |
from typing import Dict
from typing import Hashable
from typing import Any
def decode_map_states(beliefs: Dict[Hashable, Any]) -> Any:
"""Function to decode MAP states given the calculated beliefs.
Args:
beliefs: An array or a PyTree container containing beliefs for different variables.
Returns:
An array or a PyTree container containing the MAP states for different variables.
"""
return jax.tree_util.tree_map(lambda x: jnp.argmax(x, axis=-1), beliefs) | 3d8b9feecb3d612a4ff361f710ef1841cd016239 | 3,655,753 |
def plot_stretch_Q(datas, stretches=[0.01,0.1,0.5,1], Qs=[1,10,5,100]):
"""
Plots different normalizations of your image using the stretch, Q parameters.
Parameters
----------
stretches : array
List of stretch params you want to permutate through to find optimal image normalization.
Default is [0.01, 0.1, 0.5, 1]
Qs : array
List of Q params you want to permutate through to find optimal image normalization.
Default is [1, 10, 5, 100]
Code adapted from:
https://pmelchior.github.io/scarlet/tutorials/display.html
Returns
-------
fig : Figure object
"""
fig, ax = plt.subplots(len(stretches), len(Qs), figsize=(9,9))
for i, stretch in enumerate(stretches):
for j, Q in enumerate(Qs):
asinh = scarlet.display.AsinhMapping(minimum=0, stretch=stretch, Q=Q)
# Scale the RGB channels for the image
img_rgb = scarlet.display.img_to_rgb(datas, norm=asinh)
ax[i][j].imshow(img_rgb)
ax[i][j].set_title("Stretch {}, Q {}".format(stretch, Q))
ax[i][j].axis('off')
return fig | d4dc4d52019aac10fc15dd96fd29c3abf6563446 | 3,655,754 |
def _isSpecialGenerateOption(target, optName):
"""
Returns ``True`` if the given option has a special generation function,
``False`` otherwise.
"""
return _getSpecialFunction(target, optName, '_generateSpecial') is not None | 387fcb96d0d13e45b38a645ee61f20441905a0f8 | 3,655,755 |
def count_active_days(enable_date, disable_date):
"""Return the number of days the segment has been active.
:param enable_date: The date the segment was enabled
:type enable_date: timezone.datetime
:param disable_date: The date the segment was disabled
:type disable_date: timezone.datetime
:returns: The amount of days a segment is/has been active
:rtype: int
"""
if enable_date is not None:
if disable_date is None or disable_date <= enable_date:
# There is no disable date, or it is not relevant.
delta = timezone.now() - enable_date
return delta.days
if disable_date > enable_date:
# There is a disable date and it is relevant.
delta = disable_date - enable_date
return delta.days
return 0 | 070a520c328dbe69491fc6eb991c816c9f4fccd8 | 3,655,756 |
def numpy_to_python_type(value):
"""
Convert to Python type from numpy with .item().
"""
try:
return value.item()
except AttributeError:
return value | f1d3a8ad77932342c182d7be76037fee3c869afe | 3,655,757 |
import tqdm
import os
def bigEI_numerical(Ey, t, P=1):
"""Return the column kp=0 of the matrix E_I, computed numerically."""
lmax = int(np.sqrt(Ey.shape[0]) - 1)
K = len(t)
map = starry.Map(ydeg=lmax, lazy=False)
theta = 360 / P * t
bigEI = np.zeros(K)
kp = 0
for k in tqdm(range(K), disable=bool(int(os.getenv("NOTQDM", "0")))):
def integrand(I):
map.inc = I * 180 / np.pi
A = map.design_matrix(theta=theta)
return (A @ Ey @ A.T * np.sin(I))[k, kp]
bigEI[k] = quad(integrand, 0, 0.5 * np.pi)[0]
return bigEI | f437a6b818b848fb8dc715ec04c693d6d3dbd161 | 3,655,758 |
import sys
import traceback
def db_select_all(db, query, data=None):
"""Select all rows"""
logger_instance.debug("query = <<%s>>"% (query[:100],))
cursor = db.cursor()
try:
cursor.execute(query, data)
except pymysql.MySQLError:
exc_type, exc_value, exc_traceback = sys.exc_info()
err_string = "Error from MySQL:\n" + query
sys.stderr.write(err_string + "\n")
logger_instance.debug(err_string)
#sys.stderr.write(repr(data) + "\n")
logger_instance.debug(repr(data))
traceback.print_exc()
logger_instance.debug(traceback.format_exception())
cursor.close()
sys.exit(1)
#return False
else:
result = cursor.fetchall()
cursor.close()
return result | 44184540092ab698292fff9285f4bff0cb163be7 | 3,655,759 |
def threshold_abs(image, threshold):
"""Return thresholded image from an absolute cutoff."""
return image > threshold | 5032f632371af37e81c3ebcc587475422d5ff2bf | 3,655,760 |
def warp_images(img1_loc, img2_loc, h_loc):
"""
Fill documentation
"""
rows1, cols1 = img1_loc.shape[:2]
rows2, cols2 = img2_loc.shape[:2]
print("0")
list_of_points_1 = np.array(
[[0, 0], [0, rows1], [cols1, rows1], [cols1, 0]], np.float32).reshape(-1, 1, 2)
temp_points = np.array(
[[0, 0], [0, rows2], [cols2, rows2], [cols2, 0]], np.float32).reshape(-1, 1, 2)
print("1")
list_of_points_2 = cv2.perspectiveTransform(temp_points, h_loc)
list_of_points = np.concatenate(
(list_of_points_1, list_of_points_2), axis=0)
print(list_of_points)
[x_min, y_min] = np.int32(list_of_points.min(axis=0).ravel() - 0.5)
[x_max, y_max] = np.int32(list_of_points.max(axis=0).ravel() + 0.5)
print("3")
translation_dist = [-x_min, -y_min]
h_translation = np.array(
[[1, 0, translation_dist[0]], [0, 1, translation_dist[1]], [0, 0, 1]])
print(((x_max - x_min, x_max, x_min), (y_max - y_min, y_max, y_min)))
output_img = cv2.warpPerspective(
img2_loc, h_translation.dot(h_loc), (x_max - x_min, y_max - y_min))
output_img[translation_dist[1]:rows1+translation_dist[1],
translation_dist[0]:cols1+translation_dist[0]] = img1_loc
print("5")
return output_img | ab5b364ded7647efb13c686a32acc8ee6c6487ba | 3,655,761 |
def ValidaCpf(msg='Cadastro de Pessoa Física (CPF): ', pont=True):
"""
-> Função para validar um CPF
:param msg: Mensagem exibida para usuário antes de ler o CPF.
:param pont: Se True, retorna um CPF com pontuação (ex: xxx.xxx.xxx-xx).
Se False, retorna um CPF sem pontuação (ex: xxxxxxxxxxx)
:return: Retorna um CPF válido.
"""
while True:
cpf = str(input(f'{msg}'))
if '.-' in cpf and pont == False:
cpf.replace('.', '')
cpf.replace('-', '')
contDig=0
for dig in cpf:
if dig.isnumeric():
contDig += 1 # Conta a quantidade de dígitos no CPF
if contDig != 11: # Se o CPF possuir mais de 11 dígitos, retorna uma mensagem de erro
print('\033[1;31m3RRO! Este CPF é inválido!\033[m')
continue # Volta para o tpo do laço
if '.' in cpf: # Verifica a existência de pontos no CPF e se a quantidade está correta(2)
if cpf.count('.') != 2:
print('\033[1;31m3RRO! Este CPF é inválido!\033[m')
continue
else: # Se não tiver pontos e se pont=True, adiciona a pontuação
if pont:
cpf = list(cpf)
cpf.insert(3, '.')
cpf.insert(7, '.')
if '-' in cpf: # Verifica a existência do hífen no CPF e se a quantidade está correta(1)
if cpf.count('-') != 1:
print('\033[1;31m3RRO! Este CPF é inválido!\033[m')
continue
else: # Se não tiver hífen e se pont=True, adiciona a pontuação
if pont:
cpf.insert(11, '-')
result = [''.join(cpf)] # Junta a lista
cpf = result[0]
break
return cpf | 3bdc298f7a2a3a4c16919a9caba21b71bbaf8539 | 3,655,762 |
def get_xml_path(xml, path=None, func=None):
"""
Return the content from the passed xml xpath, or return the result
of a passed function (receives xpathContext as its only arg)
"""
#doc = None
#ctx = None
#result = None
#try:
doc = etree.fromstring(xml)
#ctx = doc.xpathNewContext()
if path:
#ret = ctx.xpathEval(path)
ret = doc.xpath(path)
if ret is not None:
if type(ret) == list:
if len(ret) >= 1:
result = ret[0].text
else:
result = ret
elif func:
result = func(doc)
else:
raise ValueError("'path' or 'func' is required.")
#finally:
# if doc:
# doc.freeDoc()
# if ctx:
# ctx.xpathFreeContext()
return result | 81bcce1806f11217a04fbc401226d727e0150735 | 3,655,763 |
def readXYdYData(filename, comment_character='#'):
"""
Read in a file containing 3 columns of x, y, dy
Lines beginning with commentCharacter are ignored
"""
return read_columnar_data(filename, number_columns=3, comment_character=comment_character) | 92fd9253e0b50688034e3d85d6f4589a589be066 | 3,655,764 |
def hexlen(x):
"""
Returns the string length of 'x' in hex format.
"""
return len(hex(x))+2 | 404ec4c3656bb35b87df6ae147db93922f2da059 | 3,655,765 |
def get_db():
""" connectionを取得します """
if not hasattr(g, 'sqlite_db'):
g.sqlite_db = connect_db()
return g.sqlite_db | ba3d474ba854d9dea8e8f0056ebbfd81fc86b91a | 3,655,766 |
def list_manipulation(lst, command, location, value=None):
"""Mutate lst to add/remove from beginning or end.
- lst: list of values
- command: command, either "remove" or "add"
- location: location to remove/add, either "beginning" or "end"
- value: when adding, value to add
remove: remove item at beginning or end, and return item removed
>>> lst = [1, 2, 3]
>>> list_manipulation(lst, 'remove', 'end')
3
>>> list_manipulation(lst, 'remove', 'beginning')
1
>>> lst
[2]
add: add item at beginning/end, and return list
>>> lst = [1, 2, 3]
>>> list_manipulation(lst, 'add', 'beginning', 20)
[20, 1, 2, 3]
>>> list_manipulation(lst, 'add', 'end', 30)
[20, 1, 2, 3, 30]
>>> lst
[20, 1, 2, 3, 30]
Invalid commands or locations should return None:
>>> list_manipulation(lst, 'foo', 'end') is None
True
>>> list_manipulation(lst, 'add', 'dunno') is None
True
"""
if command == "remove":
if location == "end":
return lst.pop()
elif location == "beginning":
return lst.pop(0)
elif command == "add":
if location == "beginning":
lst.insert(0,value)
return lst
elif location == "end":
lst.append(value)
return lst | c847257ea5508f60b84282c3ac8237b43cd3825a | 3,655,767 |
import six
def solve(
problem,
comm=_NoArgumentGiven,
dispatcher_rank=0,
log_filename=None,
results_filename=None,
**kwds
):
"""Solves a branch-and-bound problem and returns the
solution.
Note
----
This function also collects and summarizes runtime
workload statistics, which may introduce additional
overhead. This overhead can be avoided by directly
instantiating a :class:`Solver` object and
calling the :func:`Solver.solve` method.
Parameters
----------
problem : :class:`pybnb.Problem <pybnb.problem.Problem>`
An object that defines a branch-and-bound problem
comm : ``mpi4py.MPI.Comm``, optional
The MPI communicator to use. If unset, the
mpi4py.MPI.COMM_WORLD communicator will be
used. Setting this keyword to None will disable the
use of MPI and avoid an attempted import of
mpi4py.MPI (which avoids triggering a call to
`MPI_Init()`).
dispatcher_rank : int, optional
The process with this rank will be designated the
dispatcher process. If MPI functionality is disabled
(by setting comm=None, or when comm.size==1), this
keyword must be left at 0. (default: 0)
log_filename : string, optional
A filename where solver output should be sent in
addition to console. This keyword will be ignored if
the `log` keyword is set. (default: None)
results_filename : string, optional
Saves the solver results into a YAML-formatted file
with the given name. (default: None)
**kwds
Additional keywords to be passed to
:func:`Solver.solve`. See that method for additional
keyword documentation.
Returns
-------
results : :class:`SolverResults <pybnb.solver_results.SolverResults>`
An object storing information about the solve.
"""
opt = Solver(comm=comm, dispatcher_rank=dispatcher_rank)
if (opt.is_dispatcher) and ("log" not in kwds) and (log_filename is not None):
kwds["log"] = get_simple_logger(filename=log_filename)
results = opt.solve(problem, **kwds)
stats = opt.collect_worker_statistics()
if opt.is_dispatcher:
tmp = six.StringIO()
summarize_worker_statistics(stats, stream=tmp)
opt._disp.log_info(tmp.getvalue())
if opt.is_dispatcher and (results_filename is not None):
results.write(results_filename)
return results | 9c57c0748db0185fae1e731044e904d0f732b5de | 3,655,768 |
def solution(lst):
"""Given a non-empty list of integers, return the sum of all of the odd elements that are in even positions.
Examples
solution([5, 8, 7, 1]) ==> 12
solution([3, 3, 3, 3, 3]) ==> 9
solution([30, 13, 24, 321]) ==>0
"""
#[SOLUTION]
return sum([x for idx, x in enumerate(lst) if idx%2==0 and x%2==1]) | f98482cad7061d725389442c9811e33539df4fdc | 3,655,769 |
def summarize_traffic_mix(l_d_flow_records, d_filters={}):
"""
Filter the traffic flow data and execute the processing analysis logic for network behavior metrics.
"""
o_tcp_src_analysis = TopProtocolAnalysis()
o_tcp_dst_analysis = TopProtocolAnalysis()
o_upd_src_analysis = TopProtocolAnalysis()
o_upd_dst_analysis = TopProtocolAnalysis()
for flow in l_d_flow_records:
# print "Flow:", str(flow)
if matches_desired_flows(op_src_asn_to_filter, op_dst_asn_to_filter, op_ingress_asn_to_filter, flow, d_filters):
# get srcIP and dstIP
int_flow_sa = flow['sa']
# get bytes and packets
flow_bytes = fputil.record_to_numeric(flow['ibyt'])
flow_packets = fputil.record_to_numeric(flow['ipkt'])
# get ports and protocol
flow_sp = fputil.record_to_numeric(flow['sp'])
flow_dp = fputil.record_to_numeric(flow['dp'])
str_flow_pr = fputil.proto_int_to_str(flow['pr'])
# process and save traffic information per selected L7 protocols and group other using -1 port number
if str_flow_pr == "TCP":
if flow_sp in cons.d_proto_l7_int_str.keys():
o_tcp_src_analysis.update_port_sum(flow_sp, flow_bytes, flow_packets)
o_tcp_src_analysis.update_port_ips_sum(flow_sp, int_flow_sa)
else:
o_tcp_src_analysis.update_port_sum(-1, flow_bytes, flow_packets)
o_tcp_src_analysis.update_port_ips_sum(-1, int_flow_sa)
if flow_dp in cons.d_proto_l7_int_str.keys():
o_tcp_dst_analysis.update_port_sum(flow_dp, flow_bytes, flow_packets)
else:
o_tcp_dst_analysis.update_port_sum(-1, flow_bytes, flow_packets)
if str_flow_pr == "UDP":
if flow_sp in cons.d_proto_l7_int_str.keys():
o_upd_src_analysis.update_port_sum(flow_sp, flow_bytes, flow_packets)
o_upd_src_analysis.update_port_ips_sum(flow_sp, int_flow_sa)
else:
o_upd_src_analysis.update_port_sum(-1, flow_bytes, flow_packets)
o_upd_src_analysis.update_port_ips_sum(-1, int_flow_sa)
if flow_dp in cons.d_proto_l7_int_str.keys():
o_upd_dst_analysis.update_port_sum(flow_dp, flow_bytes, flow_packets)
else:
o_upd_dst_analysis.update_port_sum(-1, flow_bytes, flow_packets)
return [o_tcp_src_analysis, o_tcp_dst_analysis,
o_upd_src_analysis, o_upd_dst_analysis] | 48b04cf0e1e4f8b50850a775994012af4a784728 | 3,655,770 |
def segment(X, upscale=1.0, denoise=False):
"""
:param X:
:param upscale:
:param denoise:
:return:
"""
if upscale > 1.0:
X = rescale(X, upscale)
if denoise:
X = denoise_wavelet(X)
thresh = filters.threshold_otsu(X)
bw = closing(X > thresh, square(3))
cleared = clear_border(bw)
cleared = rescale(cleared, 1.0 / upscale)
return label(cleared) | e81be87bdb27b7cf1cf1de434997a87ecea0cae4 | 3,655,771 |
def get_image_info(doc):
"""Create dictionary with key->id, values->image information
"""
id_img = dict()
#add image information
for img_infor in doc['images']:
filename = img_infor['file_name']
width = img_infor['width']
height = img_infor['height']
id_img[img_infor['id']] = [filename, width, height]
return id_img | b8c91e67572e5863f773db579ce26fa86530f32e | 3,655,772 |
def __check_partial(detected,approx, width, height):
"""
Check if it's a partial shape
It's a partial shape if the shape's contours is on the image's edges.
Parameters
----------
detected : Shape
The detected shape
approx : numpy.ndarray
Approximates a polygonal curves.
width : int
Image's width
height : int
Image's height
Returns
-------
detected : Shape
The detected shape
"""
# Checks in the x,y positions of the contours.
# The shape is on the image's edges if a point is less than 1 or more than width-1.
result = np.where((approx <= 1) | (approx >= width-1))
if(len(result[0]) > 0): #result[0] contain the positions found by np.where.
detected = Shape.Shape.PARTIAL.value
else:
#check if there is a point(X or Y) equals to height or height-1.
result = np.where((approx == height) | (approx == height-1))
result = np.where(result[2] == 1) #check if this point is Y.
if(len(result[0])>0):
detected = Shape.Shape.PARTIAL.value
else:
detected = None
return detected | 7808dd156de97fa467b7b471b77fa4abdeaede95 | 3,655,773 |
import re
def get_svg_size(filename):
"""return width and height of a svg"""
with open(filename) as f:
lines = f.read().split('\n')
width, height = None, None
for l in lines:
res = re.findall('<svg.*width="(\d+)pt".*height="(\d+)pt"', l)
if len(res) > 0:
# need to scale up, maybe due to omni-graffle
scale = 2
width = round(scale*float(res[0][0]))
height = round(scale*float(res[0][1]))
res = re.findall('width="([.\d]+)', l)
if len(res) > 0:
width = round(float(res[0]))
res = re.findall('height="([.\d]+)', l)
if len(res) > 0:
height = round(float(res[0]))
if width is not None and height is not None:
return width, height
assert False, 'cannot find height and width for ' + filename | 7732df636657950b050be409ef2439c975d6940d | 3,655,774 |
import time
import os
import glob
def bin_mgf(mgf_files,output_file = None, min_bin = 50, max_bin = 850, bin_size = 0.01, max_parent_mass = 850, verbose = False, remove_zero_sum_rows = True, remove_zero_sum_cols = True, window_filter = True, filter_window_size = 50, filter_window_retain = 3, filter_parent_peak = True):
""" Bins an mgf file
Bins an mgf of ms2 spectra and returns a sparse CSR matrix. Operates on either a single or a list of mgf files.
The CSR matrix has bins on the rows and spectra as the columns
Args:
mgf_files: The path of an mgf file, or a list of multiple mgf files. Can be a directory path containing mgf files
output_file: Name of output file in pickle format.
min_bin: smallest m/z value to be binned.
max_bin: largest m/z value to be binned.
bin_size: m/z range in one bin.
max_parent_mass: Remove ions larger than this.
verbose: Print debug info.
remove_zero_sum_rows: Explicitly remove empty rows (bins).
remove_zero_sum_cols: Explicitly remove spectra where all values were filtered away (columns)
filter_parent_peak: Remove all ms2 peaks larger than the parent mass
Returns:
A sparse CSR matrix X, a list of bin names, and a list of spectra names
"""
start = time.time()
# Creates a list of bins based on the parameters inputted
bins = np.arange(min_bin, max_bin, bin_size)
# If the path passed in is a directory then loop through it
if type(mgf_files) != list and os.path.isdir(mgf_files):
dir = mgf_files
mgf_files = []
directory = os.fsencode(dir)
for file in os.listdir(directory):
filename = os.fsdecode(file)
# only save filenames of .mgf files in the directory
if filename.endswith(".mgf"):
mgf_files.append(os.path.join(dir, filename))
# If only one mgf file is passed in, make it a list so that it's iterable
elif type(mgf_files) != list:
mgf_files = glob.glob(mgf_files)
n_scans = 0
# Go through all the mgf files and see how many spectra are there in total
# for construction of the intensity matrix X
for file in mgf_files:
reader0 = mgf.MGF(file)
n_scans += len([x for x in reader0])
scan_names = []
# Create an empty sparse matrix with bins as the rows and spectra as the columns
X = dok_matrix((len(bins), n_scans), dtype=np.float32)
# Go through each file and bin each MGF file
for file in mgf_files:
X,scan_names = bin_sparse(X, file, scan_names, bins, max_parent_mass, verbose, window_filter, filter_window_size, filter_window_retain)
# Convert from DOK to CSR for easier processing/handling
X = X.tocsr()
X_orig_shape = X.shape
# Filter out rows summing to zero if specified
print("\nSummary:") if verbose else None
if remove_zero_sum_rows:
X, row_names_filter = filter_zero_rows(X)
# Adjust the bins accordingly based on row_names_filter which says which rows to keep
bins = [x for (x, v) in zip(bins, row_names_filter) if v]
print("Removed %s rows" % (X_orig_shape[0] - X.shape[0] )) if verbose else None
# Filter out columns summing to zero if specified
if remove_zero_sum_cols:
X, col_names_filter = filter_zero_cols(X)
# Adjust the scan names accordingly based on col_names_filter which says which columns to keep
scan_names = [x for (x, v) in zip(scan_names, col_names_filter) if v]
print("Removed %s cols" % (X_orig_shape[1] - X.shape[1] )) if verbose else None
if verbose:
print("Binned in %s seconds with dimensions %sx%s, %s nonzero entries (%s)\n" % (time.time()-start, X.shape[0], X.shape[1], X.count_nonzero(), X.count_nonzero()/(n_scans*len(bins))))
# If an output file is specified, write to it
if output_file is not None:
# Use pickle to create a binary file that holds the intensity matrix, bins, and spectra names
pkl.dump((X, bins, scan_names),open( output_file, "wb"))
print("Wrote data to " + output_file) if verbose else None
return(X, bins, scan_names) | f787a46415bbdc3bdbb52c62805618ad9edcec17 | 3,655,775 |
def index():
"""Video streaming home page which makes use of /mjpeg."""
return render_template('index.html') | 2fcc16af5bfc160a71f5eb74d1854b3c7e22587f | 3,655,776 |
def tex_quoted_no_underscore (s) :
"""Same as tex_quoted but does NOT quote underscores.
"""
if isinstance (s, pyk.string_types) :
s = _tex_pi_symbols.sub (_tex_subs_pi_symbols, s)
s = _tex_to_quote.sub (_tex_subs_to_quote, s)
s = _tex_tt_symbols.sub (_tex_subs_tt_symbols, s)
s = _tex_diacritics.sub (_tex_subs_diacritics, s)
return s | 96eca3b927e6c7cc84d721222ceb9e9405eb8763 | 3,655,777 |
import json
def load_from_json_file(filename):
"""
function that creates an Object from a “JSON file”
"""
with open(filename, 'r') as f:
return json.loads(f.read()) | ed46cf62548cfb7e1eb3683b688d18246b34be23 | 3,655,778 |
def _variable_to_field(v):
"""Transform a FuzzyVariable into a restx field"""
if isinstance(v.domain, FloatDomain):
a, b = v.domain.min, v.domain.max
f = fields.Float(description=v.name, required=True, min=a, max=b, example=(a + b) / 2)
elif isinstance(v.domain, CategoricalDomain):
raise NotImplementedError
else:
raise ValueError("Unknown domain for variable %s" % v)
return v.name, f | c97b25ff0abecedc6f44210d2672422d9c3eefd2 | 3,655,779 |
def abs_ang_mom(u, lat=None, radius=RAD_EARTH, rot_rate=ROT_RATE_EARTH,
lat_str=LAT_STR):
"""Absolute angular momentum."""
if lat is None:
lat = u[lat_str]
coslat = cosdeg(lat)
return radius*coslat*(rot_rate*radius*coslat + u) | 57525fa5ed995208eced76b74e4263c695340575 | 3,655,780 |
def main():
"""
Simple pyvmomi (vSphere SDK for Python) script that generates ESXi support bundles running from VCSA using vCenter Alarm
"""
# Logger for storing vCenter Alarm logs
vcAlarmLog = logging.getLogger('vcenter_alarms')
vcAlarmLog.setLevel(logging.INFO)
vcAlarmLogFile = os.path.join('/var/log', 'vcenter_alarms.log')
formatter = logging.Formatter("%(asctime)s;%(levelname)s;%(message)s","%Y-%m-%d %H:%M:%S")
vcAlarmLogHandler = logging.FileHandler(vcAlarmLogFile)
vcAlarmLogHandler.setFormatter(formatter)
vcAlarmLog.addHandler(vcAlarmLogHandler)
vcAlarmLog.propagate = False
args = get_args()
try:
si = None
try:
si = connect.SmartConnect(host=args.host,
user=args.user,
pwd=args.password,
port=int(args.port))
except IOError, e:
pass
if not si:
vcAlarmLog.info("Could not connect to the specified host using specified username and password")
print "Could not connect to the specified host using specified username and password"
return -1
atexit.register(connect.Disconnect, si)
content = si.RetrieveContent()
# Get Diag Manager which is used to generate support bundles in VC
diagManager = content.diagnosticManager
# Extract the vSphere Cluster generated from vCenter Server Alarm
cluster = os.environ['VMWARE_ALARM_EVENT_COMPUTERESOURCE']
#cluster = "Non-VSAN-Cluster"
if cluster == None:
vcAlarmLog.info("Unable to extract vSphere Cluster from VMWARE_ALARM_EVENT_COMPUTERESOURCE")
print "Unable to extract vSphere Cluster from VMWARE_ALARM_EVENT_COMPUTERESOURCE"
return -1
vcAlarmLog.info("Cluster passed from VC Alarm: " + cluster)
# Retrieve all vSphere Clusters
container = content.viewManager.CreateContainerView(content.rootFolder,
[vim.ClusterComputeResource],
True)
# Return vSphere Cluster that matches name specified
for c in container.view:
if c.name == cluster:
cluster_view = c
break
container.Destroy()
# Retrieve all ESXi hosts in the vSphere Cluster
# to generate log bundles for
hosts_to_generate_logs = []
hosts = cluster_view.host
for h in hosts:
hosts_to_generate_logs.append(h)
# Generate log bundle excluding VC logs
vcAlarmLog.info("Generating support bundle")
print "Generating support bundle"
task = diagManager.GenerateLogBundles_Task(includeDefault=False,host=hosts_to_generate_logs)
task_done = False
result = None
while not task_done:
if task.info.state == "success":
result = task.info.result
task_done = True
if task.info.state == "error":
vcAlarmLog.error("An error occured while generating support logs")
print "An error occured while generating support logs"
vcAlarmLog.error(task.info)
print task.info
return -1
task_done = True
if task.info.state == "running":
time.sleep(60)
# Path to which logs will be stored (automatically creating /esxi-support-logs dir)
dir = args.filepath + "/esxi-support-logs"
try:
os.stat(dir)
except:
vcAlarmLog.info("Creating directory " + dir + " to store support bundle")
os.mkdir(dir)
# Loop through the result to get the download URL for each
# ESXi support bundle and save it to VCSA filesystem
for file in result:
download_url = file.url
download_file = dir + "/vmsupport-" + file.system.name + ".tgz"
vcAlarmLog.info("Downloading " + download_url + " to " + download_file)
print "Downloading " + download_url + " to " + download_file
urllib.urlretrieve(download_url,download_file)
except vmodl.MethodFault, e:
vcAlarmLog.error("Caught vmodl fault : " + e.msg)
print "Caught vmodl fault : " + e.msg
return -1
except Exception, e:
vcAlarmLog.error("Caught exception : " + str(e))
print "Caught exception : " + str(e)
return -1
return 0 | 2c874bc06072896bb35f0288dd2ef4b5f69fe07f | 3,655,781 |
import collections
def _get_ngrams(segment, max_order):
"""Extracts all n-grams upto a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams upto max_order in segment
with a count of how many times each n-gram occurred.
"""
ngram_counts = collections.Counter()
for order in range(1, max_order + 1):
for i in range(0, len(segment) - order + 1):
ngram = tuple(segment[i:i + order])
ngram_counts[ngram] += 1
return ngram_counts | 561dfe8c18810ce40ce4c0ff391d6838816de116 | 3,655,782 |
def print_donors_list():
"""
print a list of existing donors
"""
print(mr.list_donors())
return False | 997860c036cac95f73242174198092a1d7d3ea9b | 3,655,783 |
def log_cef(name, severity, env, *args, **kwargs):
"""Simply wraps the cef_log function so we don't need to pass in the config
dictionary every time. See bug 707060. env can be either a request
object or just the request.META dictionary"""
c = {'cef.product': getattr(settings, 'CEF_PRODUCT', 'AMO'),
'cef.vendor': getattr(settings, 'CEF_VENDOR', 'Mozilla'),
'cef.version': getattr(settings, 'CEF_VERSION', '0'),
'cef.device_version': getattr(settings, 'CEF_DEVICE_VERSION', '0'),
'cef.file': getattr(settings, 'CEF_FILE', 'syslog'), }
# The CEF library looks for some things in the env object like
# REQUEST_METHOD and any REMOTE_ADDR stuff. Django not only doesn't send
# half the stuff you'd expect, but it specifically doesn't implement
# readline on its FakePayload object so these things fail. I have no idea
# if that's outdated code in Django or not, but andym made this
# <strike>awesome</strike> less crappy so the tests will actually pass.
# In theory, the last part of this if() will never be hit except in the
# test runner. Good luck with that.
if isinstance(env, HttpRequest):
r = env.META.copy()
if 'PATH_INFO' in r:
r['PATH_INFO'] = env.build_absolute_uri(r['PATH_INFO'])
elif isinstance(env, dict):
r = env
else:
r = {}
if settings.USE_HEKA_FOR_CEF:
return settings.HEKA.cef(name, severity, r, *args, config=c, **kwargs)
else:
return _log_cef(name, severity, r, *args, config=c, **kwargs) | bb9631f32bf2a247760ff604e998e8058f203c9e | 3,655,784 |
def collimate(S, r, phasevec, print_values = False):
"""Collimate r phase vectors into a new phase vector on [S].
Output: the collimated phase vector ([b(0),b(1),...,b(L'-1)], L') on [S].
Parameters:
S: output phase vectors has all multipliers on [S]
r: arity, the number of phase vectors that is collimated
phasevec: list of phasevectors to be collimated
To be improved:
-add scaled interval collimation with modulo measurement
"""
[b, L] = summate(r, phasevec) # calculate the values of b'(j^vec) in b
q = np.floor_divide(b,S) # calculate values of q = floor(b'(j^vec)/S)
q_meas = choice(q) # measured value is q_meas
# take values of b with q equals the measured value q_meas
b_new = np.ma.masked_where(q != q_meas, b).compressed()
L_new = len(b_new)
b_new = (b_new-b_new[0]) % S # modulo and substract first value to ignore global phase
# another equivalent option: b_new = b_new - S*q
if print_values:
#print("b =", b)
#print("q =", q)
#print("Measured value q =", q_meas)
print(phasevec[0][0], " and ", phasevec[1][0], " collimated into ", b_new)
return [b_new, L_new] | 1e1eb6c55cd1b51e7303613d581fda97ad14bdb0 | 3,655,785 |
import io
def parse_and_load(gw, subj, primitive, cgexpr, g):
""" Parse the conceptual grammar expression for the supplied subject and, if successful, add
it to graph g.
:param gw: parser gateway
:param subj: subject of expression
:param primitive: true means subClassOf, false means equivalentClass
:param cgexpr: expression to parse
:param g: graph to add the result to
:return: true means success, false error
"""
ttlresult = gw.parse(subj, primitive, cgexpr)
if ttlresult:
ttlresult = owlbasere.sub(r'\1>', ttlresult)
g.parse(io.StringIO(ttlresult), format='n3')
return bool(ttlresult) | cd5b1b27b5922fb6c0e377532192a6985a0a5783 | 3,655,786 |
def pushed(property_name, **kwargs) -> Signal:
"""
Returns the `pushed` Signal for the given property. This signal
is emitted, when a new child property is added to it.
From the perspective of a state, this can be achieved
with the `ContextWrapper.push(...)` function.<br>
__Hint:__ All key-word arguments of #constraint.s(...)
(`min_age`, `max_age`, `detached`) are supported.
"""
return s(f"{property_name}:pushed", **kwargs) | 999e6b20a92648d5042c075400af45c809f08a32 | 3,655,787 |
async def delete_data(table_name: str,
filter: str = Header(None),
filterparam: str = Header(None),
current_user_role: bool = Depends(security.get_write_permission)):
"""
Parameters
- **table_name** (path): **Required** - Name of the table to perform operations on.
- **filter** (header): Optional - SQL-like filter to limit the records to retrieve. ex: 'id=:qid and name=:qname'
- **filterparam** (header): Optional - SQL-like parameter of *filter. ex: {'qid':3,'qname':'jack'}
"""
log.logger.debug(
'Access \'/_table/{table_name}\' : run in delete_data(), input data table_name: [%s]' % table_name)
log.logger.debug('filter: [%s]' % filter)
log.logger.debug('filterparam: [%s]' % filterparam)
if not meta.DBMeta().check_table_schema(table_name):
raise HTTPException(
status_code=HTTP_404_NOT_FOUND,
detail='Table [ %s ] not found' % table_name
)
ptable = tablemodel.TableModel(table_name)
return ptable.delete(filter, filterparam) | ffcf6e1aba2089846f103820eda0960bbccd4bba | 3,655,788 |
import dask.dataframe as dd
def _is_dask_series(ddf):
"""
Will determine if the given arg is a dask dataframe.
Returns False if dask is not installed.
"""
try:
return isinstance(ddf, dd.Series)
except:
return False | 5166928c0bd54bfc69a3d7862fadc41c3a0b6d19 | 3,655,789 |
import scipy
def square(t, A=1, f=1, D=0):
"""
t: time
A: the amplitude, the peak deviation of the function from zero.
f: the ordinary frequency, the number of oscillations (cycles) that occur each second of time.
D: non-zero center amplitude
"""
square_ = A*scipy.signal.square(
2 * np.pi * f * t
) + D
return square_ | 8e1899891d5f0df6c171404c401e94f729233147 | 3,655,790 |
def self_distance_array(reference, box=None, result=None, backend="serial"):
"""Calculate all possible distances within a configuration `reference`.
If the optional argument `box` is supplied, the minimum image convention is
applied when calculating distances. Either orthogonal or triclinic boxes are
supported.
If a 1D numpy array of dtype ``numpy.float64`` with the shape
``(n*(n-1)/2,)`` is provided in `result`, then this preallocated array is
filled. This can speed up calculations.
Parameters
----------
reference : numpy.ndarray
Reference coordinate array of shape ``(3,)`` or ``(n, 3)`` (dtype is
arbitrary, will be converted to ``numpy.float32`` internally).
box : array_like, optional
The unitcell dimensions of the system, which can be orthogonal or
triclinic and must be provided in the same format as returned by
:attr:`MDAnalysis.coordinates.base.Timestep.dimensions`:\n
``[lx, ly, lz, alpha, beta, gamma]``.
result : numpy.ndarray, optional
Preallocated result array which must have the shape ``(n*(n-1)/2,)`` and
dtype ``numpy.float64``. Avoids creating the array which saves time when
the function is called repeatedly.
backend : {'serial', 'OpenMP'}, optional
Keyword selecting the type of acceleration.
Returns
-------
d : numpy.ndarray (``dtype=numpy.float64``, ``shape=(n*(n-1)/2,)``)
Array containing the distances ``dist[i,j]`` between reference
coordinates ``i`` and ``j`` at position ``d[k]``. Loop through ``d``:
.. code-block:: python
for i in range(n):
for j in range(i + 1, n):
k += 1
dist[i, j] = d[k]
.. versionchanged:: 0.13.0
Added *backend* keyword.
.. versionchanged:: 0.19.0
Internal dtype conversion of input coordinates to ``numpy.float32``.
"""
refnum = reference.shape[0]
distnum = refnum * (refnum - 1) // 2
distances = _check_result_array(result, (distnum,))
if len(distances) == 0:
return distances
if box is not None:
boxtype, box = check_box(box)
if boxtype == 'ortho':
_run("calc_self_distance_array_ortho",
args=(reference, box, distances),
backend=backend)
else:
_run("calc_self_distance_array_triclinic",
args=(reference, box, distances),
backend=backend)
else:
_run("calc_self_distance_array",
args=(reference, distances),
backend=backend)
return distances | 71ee400ad48f719316a0c3f3c101f432067e2387 | 3,655,791 |
import numpy
def mainRecursivePartitioningLoop(A, B, n_cutoff):
"""
"""
# Initialize storage objects
n = A.shape[0]
groups = numpy.zeros((n,), dtype=int)
groups_history = []
counts = {'twoway-single' : 0,
'twoway-pair' : 0,
'threeway-pair' : 0}
to_split = {0 : True}
# Recursively partition network
while numpy.any([v for v in to_split.values()]):
for gn in [g for g,v in to_split.items() if v]:
# Initialize group info
indx = numpy.where(groups==gn)[0]
ni = len(indx)
#c = numpy.zeros((1,3))
if ni > n_cutoff:
# Calc and sort eigenvecs, eigenvalues
BtoEigs = LinearOperator((ni, ni),
matvec = lambda x: B(x, A, indx),
dtype=float)
try:
if ni > 2:
vals, vecs = eigsh(BtoEigs, k=3, which='BE')
sort_inds = numpy.argsort(-vals)
vals = vals[sort_inds]
vecs = vecs[:,sort_inds]
else:
vals, vecs = eigsh(BtoEigs, k=2, which='LA')
sort_inds = numpy.argsort(-vals)
vals = vals[sort_inds]
vecs = vecs[:,sort_inds]
vals = numpy.array([vals[0], vals[1], min(0, vals[1] - 1)])
except ArpackNoConvergence:
to_split[gn] = False
# Initialize temporary score and groups holders
temp_Q = {}
temp_C = {}
# Leading eignevec 2-way
temp_C['twoway-single'] = twoway1(vecs, B, A, indx)
temp_Q['twoway-single'] = modularity(temp_C['twoway-single'],
B, A, indx)
# Convert eigenvecs to vertex vectors
mod_factor = numpy.sqrt(vals[:2] - vals[2])
vecs = vecs[:,0:2] * mod_factor
# Leading two eigenvec 2-way
temp_C['twoway-pair'] = twoway2(vecs, B, A, indx)
temp_Q['twoway-pair'] = modularity(temp_C['twoway-pair'],
B, A, indx)
# # Leading two eigenvec 3-way
# temp_C['threeway-pair'] = threewayCoarse(vecs, B, A, indx, 24)
# temp_Q['threeway-pair'] = modularity(temp_C['threeway-pair'],
# B, A, indx)
#
# Determine best Score, Grouping
best_split_ind = [k for k in temp_Q.keys()]\
[numpy.where(list(temp_Q.values())==max(temp_Q.values()))[0][0]]
best_Q = temp_Q[best_split_ind]
best_C = temp_C[best_split_ind]
# Update master group store, info regarding availalbe splitting
if (best_Q > 0) and (max(best_C) - min(best_C) > 0):
counts[best_split_ind] += 1
g0 = numpy.array(best_C)==0
g1 = numpy.array(best_C)==1
g2 = numpy.array(best_C)==2
max_gn = max(groups)
groups[indx[g1]] = max_gn + 1
groups[indx[g2]] = max_gn + 2
to_split[gn] = sum(g0) > 2
to_split[max_gn + 1] = sum(g1) > 2
to_split[max_gn + 2] = sum(g2) > 2
groups_history.append(groups.copy())
else:
to_split[gn] = False
else:
to_split[gn] = False
groups_history = numpy.array(groups_history).T
return(groups, counts, groups_history) | e2983585825f068ce1bdcc26dfd91dd85be2e060 | 3,655,792 |
def corrSmatFunc(df, metric='pearson-signed', simFunc=None, minN=None):
"""Compute a pairwise correlation matrix and return as a similarity matrix.
Parameters
----------
df : pd.DataFrame (n_instances, n_features)
metric : str
Method for correlation similarity: pearson or spearman, optionally "signed" (e.g. pearson-signed)
A "signed" similarity means that anti-correlated instances will have low similarity.
simFunc : function
Optionally supply an arbitrary distance function.
Function takes two instances and returns their distance.
minN : int
Minimum number of non-NA values in order for correlation to be non-NA.
Returns
-------
smatDf : pd.DataFrame (n_instances, n_instances)"""
if minN is None:
minN = df.shape[0]
if simFunc is None:
if metric in ['spearman', 'pearson']:
"""Anti-correlations are also considered as high similarity and will cluster together"""
smat = df.corr(method=metric, min_periods=minN).values**2
smat[np.isnan(smat)] = 0
elif metric in ['spearman-signed', 'pearson-signed']:
"""Anti-correlations are considered as dissimilar and will NOT cluster together"""
smat = df.corr(method=metric.replace('-signed', ''), min_periods=minN).values
smat = (smat**2 * np.sign(smat) + 1)/2
smat[np.isnan(smat)] = 0
else:
raise NameError('metric name not recognized')
else:
ncols = df.shape[1]
smat = np.zeros((ncols, ncols))
for i in range(ncols):
for j in range(ncols):
"""Assume distance is symetric"""
if i <= j:
tmpdf = df.iloc[:, [i, j]]
tmpdf = tmpdf.dropna()
if tmpdf.shape[0] >= minN:
d = simFunc(df.iloc[:, i], df.iloc[:, j])
else:
d = np.nan
smat[i, j] = d
smat[j, i] = d
return pd.DataFrame(smat, columns=df.columns, index=df.columns) | 3d8d3ad9c992f1f1518c8fc7699058e76f616c95 | 3,655,793 |
def rank(values, axis=0, method='average', na_option='keep',
ascending=True, pct=False):
"""
"""
if values.ndim == 1:
f, values = _get_data_algo(values, _rank1d_functions)
ranks = f(values, ties_method=method, ascending=ascending,
na_option=na_option, pct=pct)
elif values.ndim == 2:
f, values = _get_data_algo(values, _rank2d_functions)
ranks = f(values, axis=axis, ties_method=method,
ascending=ascending, na_option=na_option, pct=pct)
return ranks | 0dbdb923281f7dbf592cd7bd41615b235b0e0868 | 3,655,794 |
def _cmpopts(x, y):
"""Compare to option names.
The options can be of 2 forms: option_name or group/option_name. Options
without a group always comes first. Options are sorted alphabetically
inside a group.
"""
if '/' in x and '/' in y:
prex = x[:x.find('/')]
prey = y[:y.find('/')]
if prex != prey:
return cmp(prex, prey)
return cmp(x, y)
elif '/' in x:
return 1
elif '/' in y:
return -1
else:
return cmp(x, y) | 9da8f8f5666b2ea3f32eb092c6a3568947655400 | 3,655,795 |
def ask(question, choices):
"""Prompt user for a choice from a list. Return the choice."""
choices_lc = [x.lower() for x in choices]
user_choice = ""
match = False
while not match:
print question
user_choice = raw_input("[" + "/".join(choices) + "] ? ").strip().lower()
for choice in choices_lc:
if user_choice.startswith(choice):
match = True
break
return user_choice | 8a1f6019554dbb9e1ed6649b1a68040f99960fbe | 3,655,796 |
def get_and_validate_user(username, password):
"""
Check if user with username/email exists and specified
password matchs well with existing user password.
if user is valid, user is returned else, corresponding
exception is raised.
"""
user_model = apps.get_model("users", "User")
qs = user_model.objects.filter(Q(username=username) |
Q(email=username))
if len(qs) == 0:
raise WrongArguments("Username or password does not matches user.")
user = qs[0]
if not user.check_password(password):
raise WrongArguments("Username or password does not matches user.")
return user | 05b6675c12446e961d85b8c39b0437d51a7c40b8 | 3,655,797 |
import re
import string
def process_tweet(tweet):
"""Process tweet function.
Input:
tweet: a string containing a tweet
Output:
tweets_clean: a list of words containing the processed tweet"""
stemmer = PorterStemmer()
stopwords_english = stopwords.words('english')
# Remove stock market tickers like $GE
tweet = re.sub(r'\$\w*', '', tweet)
# Remove old style retweet text "RT"
tweet = re.sub(r'^RT[\s]+', '', tweet)
# Remove hyperlinks
tweet = re.sub(r'https?:\/\/.*[\r\n]*', '', tweet)
# Remove hashtags
# Only removing the hash # sign from the word
tweet = re.sub(r'#', '', tweet)
# Tokenize tweets
tokenizer = TweetTokenizer(preserve_case=False, strip_handles=True, reduce_len=True)
tweet_tokens = tokenizer.tokenize(tweet)
tweets_clean = []
for word in tweet_tokens:
# 1 Remove stopwords
# 2 Remove punctuation
if (word not in stopwords_english and word not in string.punctuation):
# 3 Stemming word
stem_word = stemmer.stem(word)
# 4 Add it to tweets_clean
tweets_clean.append(stem_word)
return tweets_clean | 2b69f70cfec5f90a6e58408fcd054cda7ad0f20a | 3,655,798 |
def oracle_query_id(sender_id, nonce, oracle_id):
"""
Compute the query id for a sender and an oracle
:param sender_id: the account making the query
:param nonce: the nonce of the query transaction
:param oracle_id: the oracle id
"""
def _int32(val):
return val.to_bytes(32, byteorder='big')
return hash_encode("oq", decode(sender_id) + _int32(nonce) + decode(oracle_id)) | aa97834efd3df10951e05b99035dbef8210ba33d | 3,655,799 |
Subsets and Splits