content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def compare_dirs_ignore_words(dir1, dir2, ignore_words, ignore_files=None):
"""Same as compare_dirs but ignores lines with words in ignore_words.
"""
return compare_dirs(
dir1,
dir2,
ignore=ignore_files,
function=lambda file1, file2:
compare_text_files_ignore_lines(file1, file2, ignore_words)
) | 2794027a638a7f775ae559b250a500e28a218e4a | 3,655,500 |
def float_to_wazn(value):
"""Converts a float value to an integer in the WAZN notation.
The float format has a maxium of 6 decimal digits.
:param value: value to convert from float to WAZN notation
:returns: converted value in WAZN notation
"""
return int(Decimal(value) / MICRO_WAZN) | 6bf10dfbefe51b2a785c4d0504e446662487b485 | 3,655,501 |
import time
def timer(func):
""" Decorator to measure execution time """
def wrapper(*args, **kwargs):
start_time = time.time()
ret = func(*args, **kwargs)
elapsed = time.time() - start_time
print('{:s}: {:4f} sec'.format(func.__name__, elapsed))
return ret
return wrapper | 0f6a8a4dc8eff1aa49efaf5d26ac46e0cc483b3e | 3,655,502 |
import uuid
def _create_keyword_plan_campaign(client, customer_id, keyword_plan):
"""Adds a keyword plan campaign to the given keyword plan.
Args:
client: An initialized instance of GoogleAdsClient
customer_id: A str of the customer_id to use in requests.
keyword_plan: A str of the keyword plan resource_name this keyword plan
campaign should be attributed to.create_keyword_plan.
Returns:
A str of the resource_name for the newly created keyword plan campaign.
Raises:
GoogleAdsException: If an error is returned from the API.
"""
keyword_plan_campaign_service = client.get_service(
"KeywordPlanCampaignService"
)
operation = client.get_type("KeywordPlanCampaignOperation")
keyword_plan_campaign = operation.create
keyword_plan_campaign.name = f"Keyword plan campaign {uuid.uuid4()}"
keyword_plan_campaign.cpc_bid_micros = 1000000
keyword_plan_campaign.keyword_plan = keyword_plan
network = client.enums.KeywordPlanNetworkEnum.GOOGLE_SEARCH
keyword_plan_campaign.keyword_plan_network = network
geo_target = client.get_type("KeywordPlanGeoTarget")
# Constant for U.S. Other geo target constants can be referenced here:
# https://developers.google.com/google-ads/api/reference/data/geotargets
geo_target.geo_target_constant = "geoTargetConstants/2840"
keyword_plan_campaign.geo_targets.append(geo_target)
# Constant for English
language = "languageConstants/1000"
keyword_plan_campaign.language_constants.append(language)
response = keyword_plan_campaign_service.mutate_keyword_plan_campaigns(
customer_id=customer_id, operations=[operation]
)
resource_name = response.results[0].resource_name
print(f"Created keyword plan campaign with resource name: {resource_name}")
return resource_name | b6ce2ee2ec40e1192461c41941f18fe04f901344 | 3,655,503 |
def word2vec(sentences, year):
"""
Creates a word2vec model.
@param sentences: list of list of words in each sentence (title + abstract)
@return word2vec model
"""
print("Creating word2vec model")
model = Word2Vec(sentences, size=500, window=5, min_count=1, workers=4)
model.save(f"models/decades/word2vec_{str(year)}-{str(year+9)}.model")
print("Saved word2vec model")
return model | 745bd15f4c0cea5b9417fd0562625426bd5cd293 | 3,655,504 |
def true_rjust(string, width, fillchar=' '):
""" Justify the string to the right, using printable length as the width. """
return fillchar * (width - true_len(string)) + string | 53a8cbfd049c21821b64e1a218c9af2a7b4c8b7d | 3,655,505 |
def threshold_generator_with_values(values, duration, num_classes):
"""
Args:
values: A Tensor with shape (-1,)
Values = strictly positive, float thresholds.
duration: An int.
num_classes: An int.
Returns:
thresh: A Tensor with shape
(len(list_values), duration, num_classes, num_classes).
In each matrix,
diag = 0, and off-diag shares a single value > 0.
Matrices are sorted in ascending order of the values
w.r.t. axis=0.
"""
num_thresh = values.shape[0]
thresh = tf.reshape(values, [num_thresh, 1, 1, 1])
thresh = tf.tile(thresh, [1, duration, num_classes, num_classes])
# (num thresh, num cls, num cls)
mask = tf.linalg.tensor_diag([-1.] * num_classes) + 1
thresh *= mask
# Now diag = 0.
thresh += mask * 1e-11
# Avoids 0 threholds, which may occur
# when logits for different classes have the same value,
# e.g., 0, due to loss of significance.
# This operation may cause sparsity of SAT curve
# if llr_min is << 1e-11, but such a case is ignorable
# in practice, according to my own experience.
return thresh | 58aa50e08beaba8f299af3ec9dfeb3de652e6471 | 3,655,506 |
def is_hermitian(mx, tol=1e-9):
"""
Test whether mx is a hermitian matrix.
Parameters
----------
mx : numpy array
Matrix to test.
tol : float, optional
Tolerance on absolute magitude of elements.
Returns
-------
bool
True if mx is hermitian, otherwise False.
"""
(m, n) = mx.shape
for i in range(m):
if abs(mx[i, i].imag) > tol: return False
for j in range(i + 1, n):
if abs(mx[i, j] - mx[j, i].conjugate()) > tol: return False
return True | 31e9a1faff21707b2fc44c7824bb05fc85967f00 | 3,655,507 |
def argmax(a, b, axis=1, init_value=-1, name="argmax"):
""" sort in axis with ascending order """
assert axis<len(a.shape) and len(a.shape)<=2, "invalid axis"
assert b.shape[axis] == 2, "shape mismatch"
size = a.shape[axis] # save max arg index
def argmax2d(A, B):
init = hcl.compute((2,), lambda x: init_value)
r = hcl.reduce_axis(0, size, name="rdx")
# Y as reducer tensor
def sreduce(x, Y):
with hcl.if_(x > Y[1]):
Y[0] = r
Y[1] = x
my_argmax = hcl.reducer(init, sreduce)
if axis == 1:
return hcl.update(B,
lambda x, _y: my_argmax(A[x, r], axis=r), name=name)
else: # reduce in y axis
return hcl.update(B,
lambda _x, y: my_argmax(A[r, y], axis=r), name=name)
# return decorated function
mod = hcl.def_([a.shape, b.shape], name=name)(argmax2d)
mod(a, b) | 3626126cae255498cab854f8b898a7d0f730b20d | 3,655,508 |
def morphology(src, operation="open", kernel_shape=(3, 3), kernel_type="ones"):
"""Dynamic calls different morphological operations
("open", "close", "dilate" and "erode") with the given parameters
Arguments:
src (numpy.ndarray) : source image of shape (rows, cols)
operation (str, optional) : name of a morphological operation:
``("open", "close", "dilate", "erode")``
Defaults to ``"open"``.
kernel_shape (tuple, optional) : shape of the kernel (rows, cols).
Defaults to (3,3).
kernel_type (str, optional) : type of kernel.
``("ones", "upper_triangle", "lower_triangle", "x", "plus", "ellipse")``
Defaults to ``"ones"``.
Returns:
numpy.ndarray: a copy of the source image after apply the effect.
"""
kernel = create_2D_kernel(kernel_shape, kernel_type)
if operation == "open":
return open(src, kernel)
elif operation == "close":
return close(src, kernel)
elif operation == "dilate":
return dilate(src, kernel)
elif operation == "erode":
return erode(src, kernel)
else:
valid_operations = ["open", "close", "dilate", "erode"]
raise ValueError(
f"Invalid morphology operation '{operation}'. Valid morphological operations are {valid_operations}"
) | f8258616f07b9dd0089d323d9237483c05b86c2e | 3,655,509 |
def msd(traj, mpp, fps, max_lagtime=100, detail=False, pos_columns=None):
"""Compute the mean displacement and mean squared displacement of one
trajectory over a range of time intervals.
Parameters
----------
traj : DataFrame with one trajectory, including columns frame, x, and y
mpp : microns per pixel
fps : frames per second
max_lagtime : intervals of frames out to which MSD is computed
Default: 100
detail : See below. Default False.
Returns
-------
DataFrame([<x>, <y>, <x^2>, <y^2>, msd], index=t)
If detail is True, the DataFrame also contains a column N,
the estimated number of statistically independent measurements
that comprise the result at each lagtime.
Notes
-----
Input units are pixels and frames. Output units are microns and seconds.
See also
--------
imsd
emsd
"""
if traj['frame'].max() - traj['frame'].min() + 1 == len(traj):
# no gaps: use fourier-transform algorithm
return _msd_fft(traj, mpp, fps, max_lagtime, detail, pos_columns)
else:
# there are gaps in the trajectory: use slower algorithm
return _msd_gaps(traj, mpp, fps, max_lagtime, detail, pos_columns) | 4511eb3e0d69ab5581635ba93db6dedfd387eb84 | 3,655,510 |
import random
def build_rnd_graph(golden, rel, seed=None):
"""Build a random graph for testing."""
def add_word(word):
if word not in words:
words.add(word)
def add_edge(rel, word1, word2):
data.append((rel, word1, word2))
random.seed(seed)
m, _ = golden.shape
words = set()
for i in range(m):
if golden['relation'][i] != rel:
continue
add_word(golden['word1_id'][i])
add_word(golden['word2_id'][i])
data = []
for word1 in words:
for word2 in words:
if word1 >= word2:
continue
if random.randint(0, 1):
add_edge(rel, word1, word2)
add_edge(rel, word2, word1)
df = pd.DataFrame(data, columns=('relation', 'word1_id', 'word2_id'),
index=range(len(data)))
return df | 46eacb5a51cf94ee27ed33757887afca4cc153ff | 3,655,511 |
from typing import Union
import pathlib
from typing import IO
from typing import AnyStr
import inspect
import pandas
def _make_parser_func(sep):
"""
Create a parser function from the given sep.
Parameters
----------
sep: str
The separator default to use for the parser.
Returns
-------
A function object.
"""
def parser_func(
filepath_or_buffer: Union[str, pathlib.Path, IO[AnyStr]],
sep=sep,
delimiter=None,
header="infer",
names=None,
index_col=None,
usecols=None,
squeeze=False,
prefix=None,
mangle_dupe_cols=True,
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skipinitialspace=False,
skiprows=None,
nrows=None,
na_values=None,
keep_default_na=True,
na_filter=True,
verbose=False,
skip_blank_lines=True,
parse_dates=False,
infer_datetime_format=False,
keep_date_col=False,
date_parser=None,
dayfirst=False,
cache_dates=True,
iterator=False,
chunksize=None,
compression="infer",
thousands=None,
decimal: str = ".",
lineterminator=None,
quotechar='"',
quoting=0,
escapechar=None,
comment=None,
encoding=None,
dialect=None,
error_bad_lines=True,
warn_bad_lines=True,
skipfooter=0,
doublequote=True,
delim_whitespace=False,
low_memory=True,
memory_map=False,
float_precision=None,
):
# ISSUE #2408: parse parameter shared with pandas read_csv and read_table and update with provided args
_pd_read_csv_signature = {
val.name for val in inspect.signature(pandas.read_csv).parameters.values()
}
_, _, _, f_locals = inspect.getargvalues(inspect.currentframe())
if f_locals.get("sep", sep) is False:
f_locals["sep"] = "\t"
kwargs = {k: v for k, v in f_locals.items() if k in _pd_read_csv_signature}
return _read(**kwargs)
return parser_func | 318edb7e761e163c828878a1c186edc535d824a1 | 3,655,512 |
import glob
import os
def load_and_join(LC_DIR):
"""
load and join quarters together.
Takes a list of fits file names for a given star.
Returns the concatenated arrays of time, flux and flux_err
"""
fnames = sorted(glob.glob(os.path.join(LC_DIR, "*fits")))
hdulist = fits.open(fnames[0])
t = hdulist[1].data
time = t["TIME"]
flux = t["PDCSAP_FLUX"]
flux_err = t["PDCSAP_FLUX_ERR"]
q = t["SAP_QUALITY"]
m = np.isfinite(time) * np.isfinite(flux) * np.isfinite(flux_err) * \
(q == 0)
x = time[m]
med = np.median(flux[m])
y = flux[m]/med - 1
yerr = flux_err[m]/med
for fname in fnames[1:]:
hdulist = fits.open(fname)
t = hdulist[1].data
time = t["TIME"]
flux = t["PDCSAP_FLUX"]
flux_err = t["PDCSAP_FLUX_ERR"]
q = t["SAP_QUALITY"]
m = np.isfinite(time) * np.isfinite(flux) * np.isfinite(flux_err) * \
(q == 0)
x = np.concatenate((x, time[m]))
med = np.median(flux[m])
y = np.concatenate((y, flux[m]/med - 1))
yerr = np.concatenate((yerr, flux_err[m]/med))
return x, y, yerr | 39345f4439d516b19707c68f29ab71987a54ec56 | 3,655,513 |
def dcm_to_pil_image_gray(file_path):
"""Read a DICOM file and return it as a gray scale PIL image"""
ds = dcmread(file_path)
# Get the image after apply clahe
img_filtered = Image.fromarray(apply_clahe(ds.pixel_array).astype("uint8"))
# Normalize original image to the interval [0, 255]
img = cv.normalize(ds.pixel_array, None, alpha=0, beta=255, norm_type=cv.NORM_MINMAX, dtype=cv.CV_8U)
img = Image.fromarray(img.astype("uint8"))
return [img, img_filtered] | c85b149dd1d02f24e60411ffb6d0dccfb1afd949 | 3,655,514 |
from typing import Any
def get_object_unique_name(obj: Any) -> str:
"""Return a unique string associated with the given object.
That string is constructed as follows: <object class name>_<object_hex_id>
"""
return f"{type(obj).__name__}_{hex(id(obj))}" | f817abf636673f7ef6704cbe0ff5a7a2b897a3f6 | 3,655,515 |
def create_voting_dict():
"""
Input: a list of strings. Each string represents the voting record of a senator.
The string consists of
- the senator's last name,
- a letter indicating the senator's party,
- a couple of letters indicating the senator's home state, and
- a sequence of numbers (0's, 1's, and negative 1's) indicating the senator's
votes on bills
all separated by spaces.
Output: A dictionary that maps the last name of a senator
to a list of numbers representing the senator's voting record.
Example:
>>> vd = create_voting_dict(['Kennedy D MA -1 -1 1 1', 'Snowe R ME 1 1 1 1'])
>>> vd == {'Snowe': [1, 1, 1, 1], 'Kennedy': [-1, -1, 1, 1]}
True
You can use the .split() method to split each string in the
strlist into a list; the first element of the list will be the senator's
name, the second will be his/her party affiliation (R or D), the
third will be his/her home state, and the remaining elements of
the list will be that senator's voting record on a collection of bills.
You can use the built-in procedure int() to convert a string
representation of an integer (e.g. '1') to the actual integer
(e.g. 1).
The lists for each senator should preserve the order listed in voting data.
In case you're feeling clever, this can be done in one line.
"""
voting_dic = {}
for s in voting_data:
s = s.strip()
items = s.split(' ')
voting_dic[items[0]] = [int(v) for v in items[3:]]
return voting_dic | 4a662c110b88ae92ea548da6caa15b01b82f1cf2 | 3,655,516 |
def areFriends(profile1, profile2):
"""Checks wether profile1 is connected to profile2 and profile2 is connected to profile1"""
def check(p1, p2):
if p1.isServiceIdentity:
fsic = get_friend_serviceidentity_connection(p2.user, p1.user)
return fsic is not None and not fsic.deleted
else:
friend_map = get_friends_map(p1.user)
return friend_map is not None and remove_slash_default(p2.user) in friend_map.friends
return check(profile1, profile2) and check(profile2, profile1) | 89eb04d0b8cce054e75d26d194d3f88f9b5970db | 3,655,517 |
def filter_dict(regex_dict, request_keys):
"""
filter regular expression dictionary by request_keys
:param regex_dict: a dictionary of regular expressions that
follows the following format:
{
"name": "sigma_aldrich",
"regexes": {
"manufacturer": {
"regex": "[C|c]ompany(?P\u003cdata\u003e.{80})",
"flags": "is"
},
"product_name": {
"regex": "\\s[P|p]roduct\\s(?P\u003cdata\u003e.{80})",
"flags": "is"
},
...
}
returns
{
'sigma_aldrich': {
"manufacturer": {
"regex": "[C|c]ompany(?P\u003cdata\u003e.{80})",
"flags": "is"
},
}
:param request_keys: a list of dictionary keys that correspond to valid
regex lookups i.e. ['manufacturer', 'product_name']
"""
out_dict = dict()
nested_regexes = regex_dict['regexes']
for request_key in request_keys:
if request_key in nested_regexes:
out_dict[request_key] = nested_regexes[request_key]
return {'name': regex_dict['name'], 'regexes': out_dict} | fb503f0d4df0a7965c276907b7a9e43bd14f9cac | 3,655,518 |
import six
def calculate_partition_movement(prev_assignment, curr_assignment):
"""Calculate the partition movements from initial to current assignment.
Algorithm:
For each partition in initial assignment
# If replica set different in current assignment:
# Get Difference in sets
:rtype: tuple
dict((partition, (from_broker_set, to_broker_set)), total_movements
"""
total_movements = 0
movements = {}
for prev_partition, prev_replicas in six.iteritems(prev_assignment):
curr_replicas = curr_assignment[prev_partition]
diff = len(set(curr_replicas) - set(prev_replicas))
if diff:
total_movements += diff
movements[prev_partition] = (
(set(prev_replicas) - set(curr_replicas)),
(set(curr_replicas) - set(prev_replicas)),
)
return movements, total_movements | 180a47944523f0c814748d1918935e47d9a7ada4 | 3,655,519 |
from typing import List
from typing import Union
import torch
from typing import Sequence
def correct_crop_centers(
centers: List[Union[int, torch.Tensor]],
spatial_size: Union[Sequence[int], int],
label_spatial_shape: Sequence[int],
) -> List[int]:
"""
Utility to correct the crop center if the crop size is bigger than the image size.
Args:
ceters: pre-computed crop centers, will correct based on the valid region.
spatial_size: spatial size of the ROIs to be sampled.
label_spatial_shape: spatial shape of the original label data to compare with ROI.
"""
spatial_size = fall_back_tuple(spatial_size, default=label_spatial_shape)
if not (np.subtract(label_spatial_shape, spatial_size) >= 0).all():
raise ValueError("The size of the proposed random crop ROI is larger than the image size.")
# Select subregion to assure valid roi
valid_start = np.floor_divide(spatial_size, 2)
# add 1 for random
valid_end = np.subtract(label_spatial_shape + np.array(1), spatial_size / np.array(2)).astype(np.uint16)
# int generation to have full range on upper side, but subtract unfloored size/2 to prevent rounded range
# from being too high
for i, valid_s in enumerate(valid_start):
# need this because np.random.randint does not work with same start and end
if valid_s == valid_end[i]:
valid_end[i] += 1
for i, c in enumerate(centers):
center_i = c
if c < valid_start[i]:
center_i = valid_start[i]
if c >= valid_end[i]:
center_i = valid_end[i] - 1
centers[i] = center_i
corrected_centers: List[int] = [c.item() if isinstance(c, torch.Tensor) else c for c in centers] # type: ignore
return corrected_centers | d8a28d464a4d0fcd8c1ad8ed0b790713aaa878e2 | 3,655,520 |
def contrast_normalize(data, centered=False):
"""Normalizes image data to have variance of 1
Parameters
----------
data : array-like
data to be normalized
centered : boolean
When False (the default), centers the data first
Returns
-------
data : array-like
normalized data
"""
if not centered:
data = center(data)
data = np.divide(data, np.sqrt(np.var(data)))
return data | e85e5488e0c69bcf0233c03f3595a336b3ad7921 | 3,655,521 |
def create_gdrive_folders(website_short_id: str) -> bool:
"""Create gdrive folder for website if it doesn't already exist"""
folder_created = False
service = get_drive_service()
base_query = "mimeType = 'application/vnd.google-apps.folder' and not trashed and "
query = f"{base_query}name = '{website_short_id}'"
fields = "nextPageToken, files(id, name, parents)"
folders = list(query_files(query=query, fields=fields))
if settings.DRIVE_UPLOADS_PARENT_FOLDER_ID:
filtered_folders = []
for folder in folders:
ancestors = get_parent_tree(folder["parents"])
if settings.DRIVE_UPLOADS_PARENT_FOLDER_ID in [
ancestor["id"] for ancestor in ancestors
]:
filtered_folders.append(folder)
else:
filtered_folders = folders
if len(filtered_folders) == 0:
folder_metadata = {
"name": website_short_id,
"mimeType": DRIVE_MIMETYPE_FOLDER,
}
if settings.DRIVE_UPLOADS_PARENT_FOLDER_ID:
folder_metadata["parents"] = [settings.DRIVE_UPLOADS_PARENT_FOLDER_ID]
else:
folder_metadata["parents"] = [settings.DRIVE_SHARED_ID]
folder = (
service.files()
.create(supportsAllDrives=True, body=folder_metadata, fields="id")
.execute()
)
folder_created = True
else:
folder = filtered_folders[0]
Website.objects.filter(short_id=website_short_id).update(gdrive_folder=folder["id"])
for subfolder in [
DRIVE_FOLDER_FILES,
DRIVE_FOLDER_FILES_FINAL,
DRIVE_FOLDER_VIDEOS_FINAL,
]:
query = f"{base_query}name = '{subfolder}' and parents = '{folder['id']}'"
folders = list(query_files(query=query, fields=fields))
if len(folders) == 0:
folder_metadata = {
"name": subfolder,
"mimeType": DRIVE_MIMETYPE_FOLDER,
"parents": [folder["id"]],
}
service.files().create(
supportsAllDrives=True, body=folder_metadata, fields="id"
).execute()
folder_created = True
return folder_created | 21928843c47bbc3b175b65a7268eb63e0bec1275 | 3,655,522 |
def filter_for_recognized_pumas(df):
"""Written for income restricted indicator but can be used for many other
indicators that have rows by puma but include some non-PUMA rows. Sometimes
we set nrows in read csv/excel but this approach is more flexible"""
return df[df["puma"].isin(get_all_NYC_PUMAs())] | fe3c608495603f74300dc79a4e50d185d87ca799 | 3,655,523 |
import pandas as pd
import os
def hotspots2006(path):
"""Hawaian island chain hotspot Argon-Argon ages
Ar-Ar Ages (millions of years) and distances (km) from Kilauea along the
trend of the chain of Hawaian volcanic islands and other seamounts that
are believed to have been created by a moving "hot spot".
A data frame with 10 observations on the following 6 variables.
`age`
Ar-Ar age
`CI95lim`
Measurement error; 95% CI
`geoErr`
Geological Uncertainty
`totplus`
Total uncertainty (+)
`totminus`
Total uncertainty (-)
`distance`
Distance in kilometers
Warren D. Sharp and David A. Clague, 50-Ma initiation of
Hawaiian-Emperor bend records major change in Pacific Plate motion.
Science 313: 1281-1284 (2006).
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `hotspots2006.csv`.
Returns:
Tuple of np.ndarray `x_train` with 10 rows and 6 columns and
dictionary `metadata` of column headers (feature names).
"""
path = os.path.expanduser(path)
filename = 'hotspots2006.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/DAAG/hotspots2006.csv'
maybe_download_and_extract(path, url,
save_file_name='hotspots2006.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata | fce88e988e62227c8247fd58575db1fafd35e73f | 3,655,524 |
def school_booking_cancel(request, pk_booking):
"""Render the school booking cancel page for a school representative.
:param request: httprequest received
:type request: HttpRequest
:param pk_booking: Primary Key of a Booking
:type pk_booking: int
:return: Return a HttpResponse whose content is filled with the result of the passed arguments.
:rtype: HttpResponse
"""
booking = Booking.objects.get(id=pk_booking)
if request.method == "POST":
booking.status = "Cancelled"
booking.reason_cancellation = request.POST["reason_cancellation"]
booking.save()
admin_email = ADMIN_EMAIL
send_email_booking_cancellation(admin_email, booking)
return redirect("school-dashboard")
data = {"booking": booking}
return render(request, "schoolApp/school-booking-cancel.html", data) | 1b0e09119ba453efdf486e11a57d92c508a497ff | 3,655,525 |
def bandpass_filter(df, spiky_var):
"""Detect outliers according to a passband filter specific to each variable.
Parameters
----------
df: pandas DataFrame that contains the spiky variable
spiky_var: string that designate the spiky variable
Returns
-------
id_outlier: index of outliers"""
if spiky_var == 'LE':
id_bandpass = ( df[spiky_var] < -35 ) | ( df[spiky_var] > 300 ) # in [W+1m-2]
elif spiky_var == 'H':
id_bandpass = ( df[spiky_var] < -100 ) | ( df[spiky_var] > 400 ) # in [W+1m-2]
elif spiky_var == 'CO2_flux':
id_bandpass = ( df[spiky_var] < -10 ) | ( df[spiky_var] > 20 ) # in [µmol+1s-1m-2]
elif spiky_var == 'CH4_flux':
id_bandpass = ( df[spiky_var] < -0.1 ) | ( df[spiky_var] > 0.25 ) # in [µmol+1s-1m-2]
return id_bandpass | a20e3861f04212fe8b3e44d278da9ed58d545d1c | 3,655,526 |
def load_energy():
"""Loads the energy file, skipping all useluss information and returns it as a dataframe"""
energy = pd.read_excel("Energy Indicators.xls", skiprows=17, header=0,
skip_footer=53-15, na_values="...", usecols=[2,3,4,5])
# Rename columns
energy.columns = ["Country", "Energy Supply [Petajoules]", "Energy Supply per Capita [Gigajoules]", "% Renewable"]
# Exclude numbers from country names
energy["Country"] = energy["Country"].str.replace("\d+", "")
# Delete the parentheses
energy["Country"] = energy["Country"].str.replace("\(.*\)", "")
return energy | 10c9e638398d74eed57ccab414cac5577623c6cf | 3,655,527 |
import re
def list_list_to_string(list_lists,data_delimiter=None,row_formatter_string=None,line_begin=None,line_end=None):
"""Repeatedly calls list to string on each element of a list and string adds the result
. ie coverts a list of lists to a string. If line end is None the value defaults to "\n", for no seperator use ''
"""
if line_end is None:
line_end="\n"
check_arg_type(list_lists,ListType)
string_out=""
for index,row in enumerate(list_lists):
if index==len(list_lists)-1:
if line_end is "\n":
last_end=""
else:
last_end=re.sub("\n","",line_end,count=1)
string_out=string_out+list_to_string(row,data_delimiter=data_delimiter,
row_formatter_string=row_formatter_string,
begin=line_begin,end=last_end)
else:
string_out=string_out+list_to_string(row,data_delimiter=data_delimiter,
row_formatter_string=row_formatter_string,
begin=line_begin,end=line_end)
return string_out | d1e69d21205fcc21e186a8dd160c1817fb1f0f68 | 3,655,528 |
import os
def get_env_loader(package, context):
"""This function returns a function object which extends a base environment
based on a set of environments to load."""
def load_env(base_env):
# Copy the base environment to extend
job_env = dict(base_env)
# Get the paths to the env loaders
env_loader_paths = get_env_loaders(package, context)
# If DESTDIR is set, set _CATKIN_SETUP_DIR as well
if context.destdir is not None:
job_env['_CATKIN_SETUP_DIR'] = context.package_dest_path(package)
for env_loader_path in env_loader_paths:
# print(' - Loading resultspace env from: {}'.format(env_loader_path))
resultspace_env = get_resultspace_environment(
os.path.split(env_loader_path)[0],
base_env=job_env,
quiet=True,
cached=context.use_env_cache,
strict=False)
job_env.update(resultspace_env)
return job_env
return load_env | acc7e5bf5b23c885dfeed2af71e054205c5b1aa9 | 3,655,529 |
def sampen(L, m):
"""
"""
N = len(L)
r = (np.std(L) * .2)
B = 0.0
A = 0.0
# Split time series and save all templates of length m
xmi = np.array([L[i: i + m] for i in range(N - m)])
xmj = np.array([L[i: i + m] for i in range(N - m + 1)])
# Save all matches minus the self-match, compute B
B = np.sum([np.sum(np.abs(xmii - xmj).max(axis=1) <= r) - 1 for xmii in xmi])
# Similar for computing A
m += 1
xm = np.array([L[i: i + m] for i in range(N - m + 1)])
A = np.sum([np.sum(np.abs(xmi - xm).max(axis=1) <= r) - 1 for xmi in xm])
# Return SampEn
return -np.log(A / B) | 0a97e8dd8c4edbf2ec4cbbdff3241af7de3f2a66 | 3,655,530 |
from typing import Union
def total(score: Union[int, RevisedResult]) -> int:
"""
Return the total number of successes (negative for a botch).
If `score` is an integer (from a 1st/2nd ed. die from :func:`standard` or
:func:`special`) then it is returned unmodified.
If `score` is a :class:`RevisedResult` (from :func:`revised_standard` or
:func:`revised_special`) then the value returned is the net successes,
except in the special case where there were successes but they were all
cancelled out by botches. In that case return 0 even if the net successes
is negative.
"""
return int(score) | 849b757875ea461b0ea6bf4a63270e6f5fbac28c | 3,655,531 |
import torch
import os
def load_vgg16(model_dir, gpu_ids):
""" Use the model from https://github.com/abhiskk/fast-neural-style/blob/master/neural_style/utils.py """
# if not os.path.exists(model_dir):
# os.mkdir(model_dir)
# if not os.path.exists(os.path.join(model_dir, 'vgg16.weight')):
# if not os.path.exists(os.path.join(model_dir, 'vgg16.t7')):
# os.system('wget https://www.dropbox.com/s/76l3rt4kyi3s8x7/vgg16.t7?dl=1 -O ' + os.path.join(model_dir, 'vgg16.t7'))
# vgglua = load_lua(os.path.join(model_dir, 'vgg16.t7'))
# vgg = Vgg16()
# for (src, dst) in zip(vgglua.parameters()[0], vgg.parameters()):
# dst.data[:] = src
# torch.save(vgg.state_dict(), os.path.join(model_dir, 'vgg16.weight'))
vgg = Vgg16()
# vgg.cuda()
vgg.cuda(device=gpu_ids[0])
vgg.load_state_dict(torch.load(os.path.join(model_dir, 'vgg16.weight')))
vgg = torch.nn.DataParallel(vgg, gpu_ids)
return vgg | 7e6499c5ce8f81f693015916eeb8649263dc3039 | 3,655,532 |
def rbbox_overlaps_v3(bboxes1, bboxes2, mode='iou', is_aligned=False):
"""Calculate overlap between two set of bboxes.
Args:
bboxes1 (torch.Tensor): shape (B, m, 5) in <cx, cy, w, h, a> format
or empty.
bboxes2 (torch.Tensor): shape (B, n, 5) in <cx, cy, w, h, a> format
or empty.
mode (str): "iou" (intersection over union), "iof" (intersection over
foreground) or "giou" (generalized intersection over union).
Default "iou".
is_aligned (bool, optional): If True, then m and n must be equal.
Default False.
Returns:
Tensor: shape (m, n) if ``is_aligned`` is False else shape (m,)
"""
assert mode in ['iou', 'iof']
# Either the boxes are empty or the length of boxes's last dimension is 5
assert (bboxes1.size(-1) == 5 or bboxes1.size(0) == 0)
assert (bboxes2.size(-1) == 5 or bboxes2.size(0) == 0)
rows = bboxes1.size(0)
cols = bboxes2.size(0)
if is_aligned:
assert rows == cols
if rows * cols == 0:
return bboxes1.new(rows, 1) if is_aligned else bboxes1.new(rows, cols)
return obb_overlaps(bboxes1, bboxes2, mode, is_aligned) | cdd414b02ac08c5a8bc494ed7319942e26bc5f02 | 3,655,533 |
import warnings
def get_target_compute_version(target=None):
"""Utility function to get compute capability of compilation target.
Looks for the arch in three different places, first in the target attributes, then the global
scope, and finally the GPU device (if it exists).
Parameters
----------
target : tvm.target.Target, optional
The compilation target
Returns
-------
compute_version : str
compute capability of a GPU (e.g. "8.0")
"""
# 1. Target
if target:
if "arch" in target.attrs:
compute_version = target.attrs["arch"]
major, minor = compute_version.split("_")[1]
return major + "." + minor
# 2. Global scope
from tvm.autotvm.env import AutotvmGlobalScope # pylint: disable=import-outside-toplevel
if AutotvmGlobalScope.current.cuda_target_arch:
major, minor = AutotvmGlobalScope.current.cuda_target_arch.split("_")[1]
return major + "." + minor
# 3. GPU
if tvm.gpu(0).exist:
return tvm.gpu(0).compute_version
warnings.warn(
"No CUDA architecture was specified or GPU detected."
"Try specifying it by adding '-arch=sm_xx' to your target."
)
return None | ad55cbb81fb74521175ea6fbdcba54cc14a409cb | 3,655,534 |
def get_poet_intro_by_id(uid):
"""
get poet intro by id
:param uid:
:return:
"""
return Poet.get_poet_by_id(uid) | d6fd84bd150ee8ce72becdb6b27b67d1c21c7a9b | 3,655,535 |
from datetime import datetime
def create_post():
"""Создать пост"""
user = get_user_from_request()
post = Post(
created_date=datetime.datetime.now(),
updated_date=datetime.datetime.now(),
creator=user,
)
json = request.get_json()
url = json["url"]
if Post.get_or_none(Post.url == url) is not None:
return errors.post_url_already_taken()
error = set_blog(post, json, user)
if error is not None:
error_response = {
BlogError.NoBlog: errors.blog_not_found(),
BlogError.NoAccess: errors.blog_no_access(),
}[error]
return error_response
fill_post_from_json(post, json)
post.save()
set_tags_for_post(post, json)
manage_jam_entries(post, json)
return jsonify({"success": 1, "post": post.to_json()}) | 7b8eaf74cda78198d08ca6eac66f1f13a12c4341 | 3,655,536 |
import async_timeout
async def fetch(session, url):
"""Method to fetch data from a url asynchronously
"""
async with async_timeout.timeout(30):
async with session.get(url) as response:
return await response.json() | d8ff22df047fece338dcfe4c6286766a563ff9aa | 3,655,537 |
def recurse_while(predicate, f, *args):
"""
Accumulate value by executing recursively function `f`.
The function `f` is executed with starting arguments. While the
predicate for the result is true, the result is fed into function `f`.
If predicate is never true then starting arguments are returned.
:param predicate: Predicate function guarding execution.
:param f: Function to execute.
:param *args: Starting arguments.
"""
result = f(*args)
result = result if type(result) == tuple else (result, )
while predicate(*result):
args = result # predicate(args) is always true
result = f(*args)
result = result if type(result) == tuple else (result, )
return args if len(args) > 1 else args[0] | fd3313760c246336519a2e89281cc94a2bee6833 | 3,655,538 |
from typing import Optional
import ast
import sys
def unparse(node: Optional[ast.AST]) -> Optional[str]:
"""Unparse an AST to string."""
if node is None:
return None
elif isinstance(node, str):
return node
elif node.__class__ in OPERATORS:
return OPERATORS[node.__class__]
elif isinstance(node, ast.arg):
if node.annotation:
return "%s: %s" % (node.arg, unparse(node.annotation))
else:
return node.arg
elif isinstance(node, ast.arguments):
return unparse_arguments(node)
elif isinstance(node, ast.Attribute):
return "%s.%s" % (unparse(node.value), node.attr)
elif isinstance(node, ast.BinOp):
return " ".join(unparse(e) for e in [node.left, node.op, node.right])
elif isinstance(node, ast.BoolOp):
op = " %s " % unparse(node.op)
return op.join(unparse(e) for e in node.values)
elif isinstance(node, ast.Bytes):
return repr(node.s)
elif isinstance(node, ast.Call):
args = ([unparse(e) for e in node.args] +
["%s=%s" % (k.arg, unparse(k.value)) for k in node.keywords])
return "%s(%s)" % (unparse(node.func), ", ".join(args))
elif isinstance(node, ast.Dict):
keys = (unparse(k) for k in node.keys)
values = (unparse(v) for v in node.values)
items = (k + ": " + v for k, v in zip(keys, values))
return "{" + ", ".join(items) + "}"
elif isinstance(node, ast.Ellipsis):
return "..."
elif isinstance(node, ast.Index):
return unparse(node.value)
elif isinstance(node, ast.Lambda):
return "lambda %s: ..." % unparse(node.args)
elif isinstance(node, ast.List):
return "[" + ", ".join(unparse(e) for e in node.elts) + "]"
elif isinstance(node, ast.Name):
return node.id
elif isinstance(node, ast.NameConstant):
return repr(node.value)
elif isinstance(node, ast.Num):
return repr(node.n)
elif isinstance(node, ast.Set):
return "{" + ", ".join(unparse(e) for e in node.elts) + "}"
elif isinstance(node, ast.Str):
return repr(node.s)
elif isinstance(node, ast.Subscript):
return "%s[%s]" % (unparse(node.value), unparse(node.slice))
elif isinstance(node, ast.UnaryOp):
return "%s %s" % (unparse(node.op), unparse(node.operand))
elif isinstance(node, ast.Tuple):
if node.elts:
return ", ".join(unparse(e) for e in node.elts)
else:
return "()"
elif sys.version_info > (3, 6) and isinstance(node, ast.Constant):
# this branch should be placed at last
return repr(node.value)
else:
raise NotImplementedError('Unable to parse %s object' % type(node).__name__) | b0fafc4c423af192cb4c15b1e063f7b1c8dfa568 | 3,655,539 |
import timeit
import logging
def construct_lookup_variables(train_pos_users, train_pos_items, num_users):
"""Lookup variables"""
index_bounds = None
sorted_train_pos_items = None
def index_segment(user):
lower, upper = index_bounds[user:user + 2]
items = sorted_train_pos_items[lower:upper]
negatives_since_last_positive = np.concatenate(
[items[0][np.newaxis], items[1:] - items[:-1] - 1])
return np.cumsum(negatives_since_last_positive)
start_time = timeit.default_timer()
inner_bounds = np.argwhere(train_pos_users[1:] -
train_pos_users[:-1])[:, 0] + 1
(upper_bound,) = train_pos_users.shape
index_bounds = np.array([0] + inner_bounds.tolist() + [upper_bound])
# Later logic will assume that the users are in sequential ascending order.
assert np.array_equal(train_pos_users[index_bounds[:-1]], np.arange(num_users))
sorted_train_pos_items = train_pos_items.copy()
for i in range(num_users):
lower, upper = index_bounds[i:i + 2]
sorted_train_pos_items[lower:upper].sort()
total_negatives = np.concatenate([
index_segment(i) for i in range(num_users)])
logging.info("Negative total vector built. Time: {:.1f} seconds".format(
timeit.default_timer() - start_time))
return total_negatives, index_bounds, sorted_train_pos_items | c3e1087cce5a38d681379f30d7c6dee8d1544e60 | 3,655,540 |
def total_allocation_constraint(weight, allocation: float, upper_bound: bool = True):
"""
Used for inequality constraint for the total allocation.
:param weight: np.array
:param allocation: float
:param upper_bound: bool if true the constraint is from above (sum of weights <= allocation) else from below
(sum of weights <= allocation)
:return: np.array
"""
if upper_bound:
return allocation - weight.sum()
else:
return weight.sum() - allocation | b92c4bd18d1c6246ff202987c957a5098fd66ba1 | 3,655,541 |
def sigmoid(x):
""" computes sigmoid of x """
return 1.0/(1.0 + np.exp(-x)) | cd34b4ed9fe08607ea3fec1dce89bee7c34efeb0 | 3,655,542 |
def handle_error(err):
"""Catches errors with processing client requests and returns message"""
code = 500
error = 'Error processing the request'
if isinstance(err, HTTPError):
code = err.code
error = str(err.message)
return jsonify(error=error, code=code), code | d6f9051bab504852720f657d04bc6afa72794047 | 3,655,543 |
from pyunitwizard.kernel import default_form, default_parser
from pyunitwizard import convert as _convert, get_dimensionality as _get_dimensionality
from typing import Dict
def dimensionality(quantity_or_unit: str) -> Dict[str, int]:
""" Returns the dimensionality of the quantity or unit.
Parameters
-----------
quantity_or_unit : str
A quanitity or a unit
Returns
-------
dimensionality_dict : dict
Dictionary which keys are fundamental units and values are the exponent of
each unit in the quantity.
"""
tmp_quantity_or_unit = _convert(quantity_or_unit, to_form=default_form, parser=default_parser)
return _get_dimensionality(tmp_quantity_or_unit) | 4c334c6283a57704036c414cfd52f79b875a93fc | 3,655,544 |
import re
def split_prec_rows(df):
"""Split precincts into two rows.
NOTE: Because this creates a copy of the row values, don't rely on total vote counts, just look at percentage.
"""
for idx in df.index:
# look for rows with precincts that need to be split
if re.search('\d{4}/\d{4}',idx):
row_values = df.loc[idx]
split = idx.split('/')
for p in split:
df.loc[p] = row_values
# delete original row
df = df.drop(idx, axis=0)
return(df) | 72ba424080b0ff3e04ecc5d248bc85b4f409167c | 3,655,545 |
def socfaker_elasticecsfields_host():
"""
Returns an ECS host dictionary
Returns:
dict: Returns a dictionary of ECS
host fields/properties
"""
if validate_request(request):
return jsonify(str(socfaker.products.elastic.document.fields.host)) | 41a2624eda28eae736398ece87aee2ee2028987c | 3,655,546 |
from textwrap import dedent
def _moog_writer(photosphere, filename, **kwargs):
"""
Writes an :class:`photospheres.photosphere` to file in a MOOG-friendly
format.
:param photosphere:
The photosphere.
:path filename:
The filename to write the photosphere to.
"""
def _get_xi():
xi = photosphere.meta["stellar_parameters"].get("microturbulence", 0.0)
if 0 >= xi:
logger.warn("Invalid microturbulence value: {:.3f} km/s".format(xi))
return xi
if photosphere.meta["kind"] == "marcs":
xi = _get_xi()
output = dedent("""
WEBMARCS
MARCS (2011) TEFF/LOGG/[M/H]/XI {1:.0f}/{2:.3f}/{3:.3f}/{4:.3f}
NTAU {0:.0f}
5000.0
""".format(len(photosphere),
photosphere.meta["stellar_parameters"]["effective_temperature"],
photosphere.meta["stellar_parameters"]["surface_gravity"],
photosphere.meta["stellar_parameters"]["metallicity"],
xi)).lstrip()
for i, line in enumerate(photosphere):
output += " {0:>3.0f} {0:>3.0f} {1:10.3e} {0:>3.0f} {2:10.3e} "\
"{3:10.3e} {4:10.3e}\n".format(i + 1, line["lgTau5"], line["T"],
line["Pe"], line["Pg"])
output += " {0:.3f}\n".format(xi)
output += "NATOMS 0 {0:.3f}\n".format(
photosphere.meta["stellar_parameters"]["metallicity"])
output += "NMOL 0\n"
elif photosphere.meta["kind"] == "castelli/kurucz":
xi = _get_xi()
output = dedent("""
KURUCZ
CASTELLI/KURUCZ (2004) {1:.0f}/{2:.3f}/{3:.3f}/{4:.3f}/{5:.3f}
NTAU {0:.0f}
""".format(len(photosphere),
photosphere.meta["stellar_parameters"]["effective_temperature"],
photosphere.meta["stellar_parameters"]["surface_gravity"],
photosphere.meta["stellar_parameters"]["metallicity"],
photosphere.meta["stellar_parameters"]["alpha_enhancement"],
xi)).lstrip()
for line in photosphere:
output += " {0:.8e} {1:10.3e}{2:10.3e}{3:10.3e}{4:10.3e}\n".format(
line["RHOX"], line["T"], line["P"], line["XNE"], line["ABROSS"])
output += " {0:.3f}\n".format(xi)
output += "NATOMS 0 {0:.3f}\n".format(
photosphere.meta["stellar_parameters"]["metallicity"])
output += "NMOL 0\n"
# MOOG11 fails to read if you don't add an extra line
output += "\n"
else:
raise ValueError("photosphere kind '{}' cannot be written to a MOOG-"\
"compatible format".format(photosphere.meta["kind"]))
with open(filename, "w") as fp:
fp.write(output)
return None | da5a952e15984aecd914ebcf9381900924fdeff1 | 3,655,547 |
def upcomingSplits(
symbol="",
exactDate="",
token="",
version="stable",
filter="",
format="json",
):
"""This will return all upcoming estimates, dividends, splits for a given symbol or the market. If market is passed for the symbol, IPOs will also be included.
https://iexcloud.io/docs/api/#upcoming-events
Args:
symbol (str): Symbol to look up
exactDate (str): exactDate Optional. Exact date for which to get data
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Returns:
dict or DataFrame: result
"""
return _baseEvent(
"splits",
symbol=symbol,
exactDate=exactDate,
token=token,
version=version,
filter=filter,
format=format,
) | ae21f8c04bf7bf2eacd8b16aa62c9fae7750e042 | 3,655,548 |
def mu_model(u, X, U, k):
"""
Returns the utility of the kth player
Parameters
----------
u
X
U
k
Returns
-------
"""
M = X.T @ X
rewards = M @ u
penalties = u.T @ M @ U[:, :k] * U[:, :k]
return rewards - penalties.sum(axis=1) | 59bce1ce8617f0e11340d1c1ab18315fd81e6925 | 3,655,549 |
def tokenizer_init(model_name):
"""simple wrapper for auto tokenizer"""
tokenizer = AutoTokenizer.from_pretrained(model_name)
return tokenizer | c54accf802fcbcf1479ccb0745266a5182edb73b | 3,655,550 |
def insert_message(nick, message, textDirection):
"""
Insert record
"""
ins = STATE['messages_table'].insert().values(
nick=nick, message=message, textDirection=textDirection)
res = STATE['conn'].execute(ins)
ltr = 1 if textDirection == 'ltr' else 0
rtl = 1 if textDirection == 'rtl' else 0
STATE['conn'].execute(
'update message_stats set ltr = ltr + ?, rtl = rtl + ?',
ltr, rtl)
return {
'id': res.lastrowid
} | 1163fab2342aa5e41b321055bbf75f4c23fbb031 | 3,655,551 |
from typing import Tuple
from typing import Dict
def process_metadata(metadata) -> Tuple[Dict[str, str], Dict[str, str]]:
""" Returns a tuple of valid and invalid metadata values. """
if not metadata:
return {}, {}
valid_values = {}
invalid_values = {}
for m in metadata:
key, value = m.split("=", 1)
if key in supported_metadata_keys:
valid_values[key] = value
else:
invalid_values[key] = value
return valid_values, invalid_values | fbd7affaa8743d6c45bb2a02067d737dac990eb4 | 3,655,552 |
def rightOfDeciSeperatorToDeci(a):
"""This function only convert value at the right side of decimal seperator to decimal"""
deciNum = 0
for i in range(len(a)):
deciNum += (int(a[i]))*2**-(i+1)
return deciNum | 14cfd187758836d329ac4778a30167ddece0f2a0 | 3,655,553 |
import torch
def conv(input, weight):
"""
Returns the convolution of input and weight tensors,
where input contains sequential data.
The convolution is along the sequence axis.
input is of size [batchSize, inputDim, seqLength]
"""
output = torch.nn.functional.conv1d(input=input, weight=weight)
return output | e213be11c423ff63a1ebffda55331298fcf53443 | 3,655,554 |
import torch
def irr_repr(order, alpha, beta, gamma, dtype = None):
"""
irreducible representation of SO3
- compatible with compose and spherical_harmonics
"""
cast_ = cast_torch_tensor(lambda t: t)
dtype = default(dtype, torch.get_default_dtype())
alpha, beta, gamma = map(cast_, (alpha, beta, gamma))
return wigner_d_matrix(order, alpha, beta, gamma, dtype = dtype) | ff054a05c2d79a4dcfc903116cefdfce4fa56c8f | 3,655,555 |
from typing import List
from typing import Optional
def label_to_span(labels: List[str],
scheme: Optional[str] = 'BIO') -> dict:
"""
convert labels to spans
:param labels: a list of labels
:param scheme: labeling scheme, in ['BIO', 'BILOU'].
:return: labeled spans, a list of tuples (start_idx, end_idx, label)
"""
assert scheme in ['BIO', 'BILOU'], ValueError("unknown labeling scheme")
labeled_spans = dict()
i = 0
while i < len(labels):
if labels[i] == 'O' or labels[i] == 'ABS':
i += 1
continue
else:
if scheme == 'BIO':
if labels[i][0] == 'B':
start = i
lb = labels[i][2:]
i += 1
try:
while labels[i][0] == 'I':
i += 1
end = i
labeled_spans[(start, end)] = lb
except IndexError:
end = i
labeled_spans[(start, end)] = lb
i += 1
# this should not happen
elif labels[i][0] == 'I':
i += 1
elif scheme == 'BILOU':
if labels[i][0] == 'U':
start = i
end = i + 1
lb = labels[i][2:]
labeled_spans[(start, end)] = lb
i += 1
elif labels[i][0] == 'B':
start = i
lb = labels[i][2:]
i += 1
try:
while labels[i][0] != 'L':
i += 1
end = i
labeled_spans[(start, end)] = lb
except IndexError:
end = i
labeled_spans[(start, end)] = lb
break
i += 1
else:
i += 1
return labeled_spans | 01e3a1f3d72f8ec0b1cfa2c982fc8095c06c09f8 | 3,655,556 |
from typing import Optional
def get_storage_account(account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetStorageAccountResult:
"""
The storage account.
:param str account_name: The name of the storage account within the specified resource group. Storage account names must be between 3 and 24 characters in length and use numbers and lower-case letters only.
:param str resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:storage/v20160501:getStorageAccount', __args__, opts=opts, typ=GetStorageAccountResult).value
return AwaitableGetStorageAccountResult(
access_tier=__ret__.access_tier,
creation_time=__ret__.creation_time,
custom_domain=__ret__.custom_domain,
encryption=__ret__.encryption,
id=__ret__.id,
kind=__ret__.kind,
last_geo_failover_time=__ret__.last_geo_failover_time,
location=__ret__.location,
name=__ret__.name,
primary_endpoints=__ret__.primary_endpoints,
primary_location=__ret__.primary_location,
provisioning_state=__ret__.provisioning_state,
secondary_endpoints=__ret__.secondary_endpoints,
secondary_location=__ret__.secondary_location,
sku=__ret__.sku,
status_of_primary=__ret__.status_of_primary,
status_of_secondary=__ret__.status_of_secondary,
tags=__ret__.tags,
type=__ret__.type) | c818e801d152f2b11bedac42bcefe322b94ab16e | 3,655,557 |
def format_and_add(graph, info, relation, name):
"""
input: graph and three stirngs
function formats the strings and adds to the graph
"""
info = info.replace(" ", "_")
name = name.replace(" ", "_")
inf = rdflib.URIRef(project_prefix + info)
rel = rdflib.URIRef(project_prefix + relation)
nm = rdflib.URIRef(project_prefix + name)
graph.add((inf, rel, nm))
return None | abbe87b923d4ac37262e391a7d9a878b65e4ff41 | 3,655,558 |
import os
def get_dataset(dir, batch_size, num_epochs, reshape_size, padding='SAME'):
"""Reads input data num_epochs times. AND Return the dataset
Args:
train: Selects between the training (True) and validation (False) data.
batch_size: Number of examples per returned batch.
num_epochs: Number of times to read the input data, or 0/None to
train forever.
padding: if 'SAME' , have ceil(#samples / batch_size) * epoch_nums batches
if 'VALID', have floor(#samples / batch_size) * epoch_nums batches
Returns:
A tuple (images, labels), where:
* images is a float tensor with shape [batch_size, mnist.IMAGE_PIXELS]
in the range [-0.5, 0.5].
* labels is an int32 tensor with shape [batch_size] with the true label,
a number in the range [0, mnist.NUM_CLASSES).
This function creates a one_shot_iterator, meaning that it will only iterate
over the dataset once. On the other hand there is no special initialization
required.
"""
if not num_epochs:
num_epochs = None
filenames = [os.path.join(dir, i) for i in os.listdir(dir)]
with tf.name_scope('input'):
# TFRecordDataset opens a protobuf and reads entries line by line
# could also be [list, of, filenames]
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.repeat(num_epochs)
# map takes a python function and applies it to every sample
dataset = dataset.map(decode)
dataset = dataset.map(extract)
dataset = dataset.map(cast_type)
dataset = dataset.map(augment)
dataset = dataset.map(normalize)
dataset = dataset.map(set_parameter(reshape, reshape_size=reshape_size))
# the parameter is the queue size
dataset = dataset.shuffle(1000 + 3 * batch_size)
dataset = dataset.batch(batch_size)
return dataset | cdb47a4a4fcbe01f53a878c95755a711906224b1 | 3,655,559 |
def to_log_space(p:float, bounds: BOUNDS_TYPE):
""" Interprets p as a point in a rectangle in R^2 or R^3 using Morton space-filling curve
:param bounds [ (low,high), (low,high), (low,high) ] defaults to unit cube
:param dim Dimension. Only used if bounds are not supplied.
Very similar to "to_space" but assumes speed varies with logarithm
"""
assert 0 <= p <= 1
dim = len(bounds)
us = list(reversed(ZCurveConventions().to_cube(zpercentile=p, dim=dim))) # 0 < us[i] < 1
return [to_log_space_1d(u, low=b[0], high=b[1]) for u, b in zip(us, bounds)] | 2ab53687481100bda229456b8aa5fad4d8ef817d | 3,655,560 |
def rsi_tradingview(ohlc: pd.DataFrame, period: int = 14, round_rsi: bool = True):
""" Implements the RSI indicator as defined by TradingView on March 15, 2021.
The TradingView code is as follows:
//@version=4
study(title="Relative Strength Index", shorttitle="RSI", format=format.price, precision=2, resolution="")
len = input(14, minval=1, title="Length")
src = input(close, "Source", type = input.source)
up = rma(max(change(src), 0), len)
down = rma(-min(change(src), 0), len)
rsi = down == 0 ? 100 : up == 0 ? 0 : 100 - (100 / (1 + up / down))
plot(rsi, "RSI", color=#8E1599)
band1 = hline(70, "Upper Band", color=#C0C0C0)
band0 = hline(30, "Lower Band", color=#C0C0C0)
fill(band1, band0, color=#9915FF, transp=90, title="Background")
:param ohlc:
:param period:
:param round_rsi:
:return: an array with the RSI indicator values
"""
delta = ohlc["close"].diff()
up = delta.copy()
up[up < 0] = 0
up = pd.Series.ewm(up, alpha=1/period).mean()
down = delta.copy()
down[down > 0] = 0
down *= -1
down = pd.Series.ewm(down, alpha=1/period).mean()
rsi = np.where(up == 0, 0, np.where(down == 0, 100, 100 - (100 / (1 + up / down))))
return np.round(rsi, 2) if round_rsi else rsi | 5b9d96d5e174d6534c2d32a3ece83a5fb09b28ba | 3,655,561 |
def bin_by(x, y, nbins=30):
"""Bin x by y, given paired observations of x & y.
Returns the binned "x" values and the left edges of the bins."""
bins = np.linspace(y.min(), y.max(), nbins+1)
# To avoid extra bin for the max value
bins[-1] += 1
indicies = np.digitize(y, bins)
output = []
for i in xrange(1, len(bins)):
output.append(x[indicies==i])
# Just return the left edges of the bins
bins = bins[:-1]
return output, bins | ff0f9e79f3561cabf2a6498e31a78836be38bfb3 | 3,655,562 |
def calc_stats_with_cumsum(df_tgt, list_tgt_status, dict_diff, calc_type=0):
""" Calculate statistics with cumulative sum of target status types. \n
"dict_diff" is dictionaly of name key and difference value. ex) {"perweek": 7, "per2week": 14} \n
calc_type=0: calculate for each simulation result. \n
calc_type=1: calculate for each daycount result. """
# Prepare front side of dataframe.
if calc_type == 0:
sim_num = len(df_tgt[list_tgt_status[0]].columns)
output_df = pd.DataFrame([i for i in range(sim_num)], columns=["sim_num"])
else:
output_df = df_tgt.iloc[:, :2].copy()
# Calculate statistics with cumulative sum.
for one_status in list_tgt_status:
# Extract target status data.
one_tgt_df = df_tgt[one_status]
# Calculate the days difference in dict_diff.
dict_df_diff = {}
for one_key, one_diff in dict_diff.items():
temp_df_diff = one_tgt_df.cumsum().diff(one_diff)
temp_df_diff.iloc[one_diff-1, :] = one_tgt_df.cumsum().iloc[one_diff-1, :]
dict_df_diff[one_key] = temp_df_diff
if calc_type == 0:
# Each simulation.
output_df.loc[:, "{}_perday_mean".format(one_status)] = one_tgt_df.T.mean(axis=1).values
output_df.loc[:, "{}_perday_std".format(one_status)] = one_tgt_df.T.std(axis=1).values
output_df.loc[:, "{}_perday_min".format(one_status)] = one_tgt_df.T.min(axis=1).values
output_df.loc[:, "{}_perday_quartile1".format(one_status)] = one_tgt_df.T.quantile(q=0.25, axis=1).values
output_df.loc[:, "{}_perday_median".format(one_status)] = one_tgt_df.T.median(axis=1).values
output_df.loc[:, "{}_perday_quartile3".format(one_status)] = one_tgt_df.T.quantile(q=0.75, axis=1).values
output_df.loc[:, "{}_perday_max".format(one_status)] = one_tgt_df.T.max(axis=1).values
for one_key, one_diff in dict_diff.items():
output_df.loc[:, "{}_{}_mean".format(one_status, one_key)] = dict_df_diff[one_key].T.mean(axis=1).values
output_df.loc[:, "{}_{}_std".format(one_status, one_key)] = dict_df_diff[one_key].T.std(axis=1).values
output_df.loc[:, "{}_{}_min".format(one_status, one_key)] = dict_df_diff[one_key].T.min(axis=1).values
output_df.loc[:, "{}_{}_quartile1".format(one_status, one_key)] = dict_df_diff[one_key].T.quantile(q=0.25, axis=1).values
output_df.loc[:, "{}_{}_median".format(one_status, one_key)] = dict_df_diff[one_key].T.median(axis=1).values
output_df.loc[:, "{}_{}_quartile3".format(one_status, one_key)] = dict_df_diff[one_key].T.quantile(q=0.75, axis=1).values
output_df.loc[:, "{}_{}_max".format(one_status, one_key)] = dict_df_diff[one_key].T.max(axis=1).values
else:
# Each day.
output_df.loc[:, "{}_perday_mean".format(one_status)] = one_tgt_df.mean(axis=1)
output_df.loc[:, "{}_perday_std".format(one_status)] = one_tgt_df.std(axis=1)
output_df.loc[:, "{}_perday_min".format(one_status)] = one_tgt_df.min(axis=1)
output_df.loc[:, "{}_perday_quartile1".format(one_status)] = one_tgt_df.quantile(q=0.25, axis=1)
output_df.loc[:, "{}_perday_median".format(one_status)] = one_tgt_df.median(axis=1)
output_df.loc[:, "{}_perday_quartile3".format(one_status)] = one_tgt_df.quantile(q=0.75, axis=1)
output_df.loc[:, "{}_perday_max".format(one_status)] = one_tgt_df.max(axis=1)
for one_key, one_diff in dict_diff.items():
# Note: Processing is well done, but numpy warning occurs.
# Note: Because all the data of first few days in "perweek" and "per2week" become np.NaN.
output_df.loc[:, "{}_{}_mean".format(one_status, one_key)] = dict_df_diff[one_key].mean(axis=1)
output_df.loc[:, "{}_{}_std".format(one_status, one_key)] = dict_df_diff[one_key].std(axis=1)
output_df.loc[:, "{}_{}_min".format(one_status, one_key)] = dict_df_diff[one_key].min(axis=1)
output_df.loc[:, "{}_{}_quartile1".format(one_status, one_key)] = dict_df_diff[one_key].quantile(q=0.25, axis=1)
output_df.loc[:, "{}_{}_median".format(one_status, one_key)] = dict_df_diff[one_key].median(axis=1)
output_df.loc[:, "{}_{}_quartile3".format(one_status, one_key)] = dict_df_diff[one_key].quantile(q=0.75, axis=1)
output_df.loc[:, "{}_{}_max".format(one_status, one_key)] = dict_df_diff[one_key].max(axis=1)
return output_df | 2dcdd95b8723f250a24afae548b8fd4ce9b5f51c | 3,655,563 |
def _normalize_handler_method(method):
"""Transforms an HTTP method into a valid Python identifier."""
return method.lower().replace("-", "_") | aad23dba304ba39708e4415de40019479ccf0195 | 3,655,564 |
def getContentType(the_type):
"""
Get the content type based on the type name which is in settings
:param the_type:
:return:
"""
if the_type not in settings.XGDS_MAP_SERVER_JS_MAP:
return None
the_model_name = settings.XGDS_MAP_SERVER_JS_MAP[the_type]['model']
splits = the_model_name.split('.')
content_type = ContentType.objects.get(app_label=splits[0], model=splits[1])
return content_type | 25031eb0dce8fe7828f94bdbc99d5c574f0e5ea6 | 3,655,565 |
import scipy
import math
import numpy
def calculateGravityAcceleration(stateVec, epoch, useGeoid):
""" Calculate the acceleration due to gravtiy acting on the satellite at
a given state (3 positions and 3 velocities). Ignore satellite's mass,
i.e. use a restricted two-body problem.
Arguments
----------
numpy.ndarray of shape (1,6) with three Cartesian positions and three
velocities in an inertial reference frame in metres and metres per
second, respectively.
epoch - datetime corresponding to the UTC epoch at which the rate of change
is to be computed.
useGeoid - bool, whether to compute the gravity by using EGM geopotential
expansion (True) or a restricted two-body problem (False).
Returns
----------
numpy.ndarray of shape (1,3) with three Cartesian components of the
acceleration in m/s2 given in an inertial reference frame.
"""
if useGeoid:
" Compute geocentric co-latitude, longitude & radius. "
colatitude,longitude,r = calculateGeocentricLatLon(stateVec, epoch)
" Find the gravitational potential at the desired point. "
# See Eq. 1 in Cunningham (1996) for the general form of the geopotential expansion.
gravitationalPotential = 0.0 # Potential of the gravitational field at the stateVec location.
for degree in range(0, MAX_DEGREE+1): # Go through all the desired orders and compute the geoid corrections to the sphere.
temp = 0. # Contribution to the potential from the current degree and all corresponding orders.
legendreCoeffs = scipy.special.legendre(degree) # Legendre polynomial coefficients corresponding to the current degree.
for order in range(degree+1): # Go through all the orders corresponding to the currently evaluated degree.
if (abs(colatitude-math.pi/2. <= 1E-16)) or (abs(colatitude-3*math.pi/2. <= 1E-16)): # We're at the equator, cos(colatitude) will be zero and things will break.
temp += legendreCoeffs[order] * 1.0 * (Ccoeffs[degree][order]*math.cos( order*longitude ) + Scoeffs[degree][order]*math.sin( order*longitude ))
else:
temp += legendreCoeffs[order] * math.cos(colatitude) * (Ccoeffs[degree][order]*math.cos( order*longitude ) + Scoeffs[degree][order]*math.sin( order*longitude ))
gravitationalPotential += math.pow(EarthRadius/r, degree) * temp # Add the contribution from the current degree.
gravitationalPotential *= GM/r # Final correction (*GM for acceleration, /r to get r^(n+1) in the denominator).
" Compute the acceleration due to the gravity potential at the given point. "
# stateVec is defined w.r.t. Earth's centre of mass, so no need to account
# for the geoid shape here.
gravityAcceleration = gravitationalPotential/r* (-1.*stateVec[:3]/r) # First divide by the radius to get the acceleration value, then get the direction (towards centre of the Earth).
else:
r = numpy.linalg.norm(stateVec[:3]) # Earth-centred radius.
gravityAcceleration = GM/(r*r) * (-1.*stateVec[:3]/r) # First compute the magnitude, then get the direction (towards centre of the Earth).
return gravityAcceleration | a2ea0ff1c8feb9f0a678911130e3ca6e96838b7c | 3,655,566 |
import math
def points_on_line(r0, r1, spacing):
"""
Coordinates of points spaced `spacing` apart between points `r0` and `r1`.
The dimensionality is inferred from the length of the tuples `r0` and `r1`,
while the specified `spacing` will be an upper bound to the actual spacing.
"""
dim = len(r0)
v = np.array(r1) - np.array(r0)
length = np.linalg.norm(v)
steps = math.ceil(1.0 * length / spacing) + 1
points = np.zeros((steps, dim))
for i in xrange(dim):
points[:, i] = np.linspace(r0[i], r1[i], steps)
return points | eb2795cba55566823632c75b7a72f34731b5e36e | 3,655,567 |
def index() -> Response:
"""
Return application index.
"""
return APP.send_static_file("index.html") | 37d299ec548fe4f83d8f55f063e3bf9f5fb64c4e | 3,655,568 |
def compare_files(og_maxima,new_maxima, compare_file, until=100, divisor=1000):
"""
given input of the maxima of a graph, compare it to the maxima from data100.txt
maxima will be a series of x,y coordinates corresponding to the x,y values of a maximum from a file.
First see if there is a maxima with the same x value as data100.txt, if there is not expand the x value ranges
until a maximum is found. Find out what this dx is for the new file.
Note do it for all the peaks of data100.txt at once, so that if it finds a peak for the 2nd peak of data100.txt,
it doesn't also assign this to the first peak as well.
kewyword arguments until and divisor:
for the dx loop the loop will increase dx from 0 until until/divisor in steps of 1/divisor
eg for default values until=100 and divisor=1000,
it will increase dx from 0 until 100/1000 (=0.1) in steps of 1/1000 (=0.001)
changing these arguments will lead to more or less peak matching, which could
affect the results of the calculation significantly.
"""
if compare_file == 'data100.txt':
return None
# Whenever there is a match we will iterate this, so that we can compare
#this at the end?
number_of_matches = 0
# Initiate two lists to contain all the dx and dy values for each peak that
# is matched by the code.
dx_values = []
dy_values = []
# Loop through the original maxima list (supplied as an argument)
# and also loop through the maxima from the file being compared.
for og_idx,og_val in enumerate(og_maxima.T[0]):
for idx,val in enumerate(new_maxima.T[0]):
#this will loop dx from 0 to (until)/divisor in steps of 1/divisor
for x in range(until+1):
dx = x/divisor
# For the current value of dx see if there is a matching
# peak between the data100.txt file and the file being compared.
# There is a match if the val from the compare_file is within the range
# of the original peak x value +/- the dx value.
if og_val - dx <= val <= og_val + dx:
#if there is a match print some logging information to the console.
print(f"Peak Match : index {og_idx} from data100.txt and {idx} from {compare_file}")
print(f"values are {og_val} and {val} respectively")
# iterate the number of peak matches between the two files being compared.
number_of_matches+=1
# append the current dx value to our running list which will keep track
# of the dx values for all the matched peaks
dx_values.append(dx)
# Get the absolute value of the difference in y values (dy)
dy = abs(og_maxima.T[1][og_idx] - new_maxima.T[1][idx])
dy_values.append(dy)
#breaks us out of the "for x in range" loop
break
# If the for loop (for x in range ...) isn't terminated by a break statement
# I.E. we didn't get a match
else:
"move onto next peak in new_maxima"
continue
# If the for loop does get terminated by the break statement
# I.E. we get a match
"""compare next peak in og_maxima, IE break the new_maxima loop and move onto
next in the original maxima list"""
break
# Calculate the absolute value of the difference in number of peaks
# between the two data files
different_no_peaks = abs(len(new_maxima) - len(og_maxima))
return [dx_values, dy_values, number_of_matches, different_no_peaks] | 86fe2ffd02785d41284b8edfef44d0dc0e097c90 | 3,655,569 |
import _datetime
def parseDatetimetz(string, local=True):
"""Parse the given string using :func:`parse`.
Return a :class:`datetime.datetime` instance.
"""
y, mo, d, h, m, s, tz = parse(string, local)
s, micro = divmod(s, 1.0)
micro = round(micro * 1000000)
if tz:
offset = _tzoffset(tz, None) / 60
_tzinfo = tzinfo(offset)
else:
_tzinfo = None
return _datetime(y, mo, d, int(h), int(m), int(s), int(micro), _tzinfo) | ce95f42f568b50ffcdc0084dc659a1d5fd0233ff | 3,655,570 |
def median_ratio_flux(spec, smask, ispec, iref, nsig=3., niter=5, **kwargs):
""" Calculate the median ratio between two spectra
Parameters
----------
spec
smask:
True = Good, False = Bad
ispec
iref
nsig
niter
kwargs
Returns
-------
med_scale : float
Median of reference spectrum to input spectrum
"""
# Setup
fluxes, sigs, wave = unpack_spec(spec)
# Mask
okm = smask[iref,:] & smask[ispec,:]
# Insist on positive values
okf = (fluxes[iref,:] > 0.) & (fluxes[ispec,:] > 0)
allok = okm & okf
# Ratio
med_flux = fluxes[iref,allok] / fluxes[ispec,allok]
# Clip
mn_scale, med_scale, std_scale = stats.sigma_clipped_stats(med_flux, sigma=nsig, maxiters=niter, **kwargs)
# Return
return med_scale | 28872a548ce7569f17f155242aaf4377bf0c1b63 | 3,655,571 |
def get_tags_from_event():
"""List of tags
Arguments:
event {dict} -- Lambda event payload
Returns:
list -- List of AWS tags for use in a CFT
"""
return [
{
"Key": "OwnerContact",
"Value": request_event['OwnerContact']
}
] | e7a0f7da62a4904dbfb716c57b6811053aff3497 | 3,655,572 |
from typing import List
def _verify(symbol_table: SymbolTable, ontology: _hierarchy.Ontology) -> List[Error]:
"""Perform a battery of checks on the consistency of ``symbol_table``."""
errors = _verify_there_are_no_duplicate_symbol_names(symbol_table=symbol_table)
if len(errors) > 0:
return errors
errors.extend(
_verify_with_model_type_for_classes_with_at_least_one_concrete_descendant(
symbol_table=symbol_table
)
)
errors.extend(
_verify_all_the_function_calls_in_the_contracts_are_valid(
symbol_table=symbol_table
)
)
errors.extend(
_verify_all_non_optional_properties_are_initialized_in_the_constructor(
symbol_table=symbol_table
)
)
errors.extend(
_verify_orders_of_constructors_arguments_and_properties_match(
symbol_table=symbol_table
)
)
errors.extend(
_verify_all_argument_references_occur_in_valid_context(
symbol_table=symbol_table
)
)
errors.extend(_verify_constraints_and_constraintrefs(symbol_table=symbol_table))
errors.extend(_verify_description_rendering_with_smoke(symbol_table=symbol_table))
errors.extend(_verify_only_simple_type_patterns(symbol_table=symbol_table))
if len(errors) > 0:
return errors
_assert_interfaces_defined_correctly(symbol_table=symbol_table, ontology=ontology)
_assert_all_class_inheritances_defined_an_interface(symbol_table=symbol_table)
_assert_self_not_in_concrete_descendants(symbol_table=symbol_table)
return errors | da9dd12f01107a0c0ea1a8b2df1aa2fb543391ab | 3,655,573 |
def gsl_eigen_symmv_alloc(*args, **kwargs):
"""gsl_eigen_symmv_alloc(size_t n) -> gsl_eigen_symmv_workspace"""
return _gslwrap.gsl_eigen_symmv_alloc(*args, **kwargs) | 54384bfa9787b9a337ad3b9e2d9ea211769238d4 | 3,655,574 |
def add_poll_answers(owner, option):
"""
Add poll answer object. Matching user and option is considered same.
:param owner: User object.
:param option: Chosen poll option.
:return: Poll answer object, Boolean (true, if created).
"""
'''
owner = models.ForeignKey(User, related_name='poll_answers', on_delete=models.CASCADE)
answer = models.ForeignKey(PollOption, related_name='answers', on_delete=models.CASCADE)
'''
created = False
try:
a = PollAnswer.objects.get(owner=owner, answer=option)
except PollAnswer.DoesNotExist:
a = PollAnswer(owner=owner, answer=option)
a.save()
return a, created | ac667fbfb47aeb7d2450a3d698b0b678c3bdfdbc | 3,655,575 |
def calculate_rrfdi ( red_filename, nir_filename ):
"""
A function to calculate the Normalised Difference Vegetation Index
from red and near infrarred reflectances. The reflectance data ought to
be present on two different files, specified by the varaibles
`red_filename` and `nir_filename`. The file format ought to be
recognised by GDAL
"""
g_red = gdal.Open ( red_filename )
red = g_red.ReadAsArray()
g_nir = gdal.Open ( nir_filename )
nir = g_nir.ReadAsArray()
if ( g_red.RasterXSize != g_nir.RasterXSize ) or \
( g_red.RasterYSize != g_nir.RasterYSize ):
print "ERROR: Input datasets do't match!"
print "\t Red data shape is %dx%d" % ( red.shape )
print "\t NIR data shape is %dx%d" % ( nir.shape )
sys.exit ( -1 )
passer = True
rrfdi = np.where ( passer, (1.*red - 1.*nir ) / ( 1.*nir + 1.*red ), -999 )
return rrfdi*(-1) | 3b8f4d7eadceb38b7f874bfe0a56827f7a8aab09 | 3,655,576 |
import sys
def retry_on_failure(retries=NO_RETRIES):
"""Decorator which runs a test function and retries N times before
actually failing.
"""
def logfun(exc):
print("%r, retrying" % exc, file=sys.stderr) # NOQA
return retry(exception=AssertionError, timeout=None, retries=retries,
logfun=logfun) | cfd1427001036597e99cb27b3ce16b4edcfae8ba | 3,655,577 |
import argparse
def command_line():
"""Generate an Argument Parser object to control the command line options
"""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("-w", "--webdir", dest="webdir",
help="make page and plots in DIR", metavar="DIR",
default=None)
parser.add_argument("-s", "--samples", dest="samples",
help="Posterior samples hdf5 file", nargs='+',
default=None)
parser.add_argument("--labels", dest="labels",
help="labels used to distinguish runs", nargs='+',
default=None)
parser.add_argument("--prior", dest="prior",
choices=["population", "default", "both"],
default="both",
help=("Prior to use when calculating source "
"classification probabilities"))
parser.add_argument("--plot", dest="plot",
help="name of the plot you wish to make",
default="bar", choices=["bar", "mass_1_mass_2"])
return parser | f1463f291cc99acc66cb1fb46d1be0a7ef60e9ca | 3,655,578 |
import re
def strip_price(header_list):
"""input a list of tag-type values and return list of strings with surrounding html characters removed"""
match_obs = []
regex = '\$(((\d+).\d+)|(\d+))'
string_list = []#['' for item in range(len(header_list))]
for item in range(len(header_list)):
match_obs.append(re.search(regex, str(header_list[item])))
for i in range(len(match_obs)):
#print(match_obs[i])
string_list.append(match_obs[i].group(1))
#print(string_list)
return string_list | 7b3d90416e44f8aa61ababc0e7b68f82ae754413 | 3,655,579 |
import functools
def module(input, output, version):
"""A decorator which turn a function into a module"""
def decorator(f):
class Wrapper(Module):
def __init__(self):
super().__init__(input, output, version)
@property
def name(self):
"""The module's name"""
return f.__name__
def execute(self, *args, **kwargs):
return f(*args, **kwargs)
wrapper = Wrapper()
return functools.wraps(f)(wrapper)
return decorator | b7d5afcaa8fa52411024f84f979891d19ccf60c0 | 3,655,580 |
def compile_modules_to_ir(
result: BuildResult,
mapper: genops.Mapper,
compiler_options: CompilerOptions,
errors: Errors,
) -> ModuleIRs:
"""Compile a collection of modules into ModuleIRs.
The modules to compile are specified as part of mapper's group_map.
Returns the IR of the modules.
"""
deser_ctx = DeserMaps({}, {})
modules = {}
# Process the graph by SCC in topological order, like we do in mypy.build
for scc in sorted_components(result.graph):
scc_states = [result.graph[id] for id in scc]
trees = [st.tree for st in scc_states if st.id in mapper.group_map and st.tree]
if not trees:
continue
fresh = all(id not in result.manager.rechecked_modules for id in scc)
if fresh:
load_scc_from_cache(trees, result, mapper, deser_ctx)
else:
scc_ir = compile_scc_to_ir(trees, result, mapper, compiler_options, errors)
modules.update(scc_ir)
return modules | e2ea8a87a1ed2450e4c8ed99c7ca8a3142568f45 | 3,655,581 |
def minutes_to_restarttime(minutes) :
"""
converts an int meaning Minutes after midnight into a
restartTime string understood by the bos command
"""
if minutes == -1 :
return "never"
pod = "am"
if minutes > 12*60 :
pod = "pm"
minutes -= 12*60
time = "%d:%02d %s" % (minutes / 60, minutes % 60, pod)
return time | 6d7807cebb7a474553dda8eadfd27e5ce7b2a657 | 3,655,582 |
import tqdm
def ccm_test(x, y,emb_dim = "auto", l_0 = "auto", l_1 = "auto", tau=1, n=10,mean_num = 10,max_dim = 10):
"""
estimate x from y to judge x->y cause
:param x:
:param y:
:param l_0:
:param l_1:
:param emb_dim:
:param tau:
:param n:
:return:
"""
if emb_dim == "auto":
emb_dim = decide_dim(x,y)
if l_0 == "auto":
l_0 = int(np.ceil((len(x) - emb_dim + 1) * 0.1))
if l_1 == "auto":
l_1 = int(np.ceil((len(x) - emb_dim + 1) * 0.9))
ys = twin_surrogate(y, emb_dim,num=n)
raw_rhos = []
rhos = []
max_length = len(ys[0])
for i in tqdm(range(n)):
mean = 0
for j in range(mean_num):
rho_0, _ = estimate_using_bootstrap(x, y, length=l_0, emb_dim=emb_dim, tau=tau)
rho_1, _ = estimate_using_bootstrap(x, y, length=l_1, emb_dim=emb_dim, tau=tau)
rho_s_0, _ = estimate_from_emb_random(x, ys[i], length=l_0, emb_dim=emb_dim, tau=tau, max_length = max_length)
rho_s_1, _ = estimate_from_emb_random(x, ys[i], length=l_1, emb_dim=emb_dim, tau=tau, max_length = max_length)
raw_rhos.append([rho_0, rho_1, rho_s_0, rho_s_1])
mean += rho_1 -rho_0 -(rho_s_1 - rho_s_0 )
rhos.append(mean/mean_num)
rhos = np.array(rhos)
p = 1 - (len(rhos[rhos>0]) / n)
return {
"p_value" :p,
"rhos" :rhos,
"raw_rhos":raw_rhos
} | c03a05e62df36910ea05e361c9683b60befc1b9c | 3,655,583 |
def make_indiv_spacing(subject, ave_subject, template_spacing, subjects_dir):
"""
Identifies the suiting grid space difference of a subject's volume
source space to a template's volume source space, before a planned
morphing takes place.
Parameters:
-----------
subject : str
Subject ID.
ave_subject : str
Name or ID of the template brain, e.g., fsaverage.
template_spacing : float
Grid spacing used for the template brain.
subjects_dir : str
Path to the subjects directory.
Returns:
--------
trans : SourceEstimate
The generated source time courses.
"""
fname_surf = op.join(subjects_dir, subject, 'bem', 'watershed', '%s_inner_skull_surface' % subject)
fname_surf_temp = op.join(subjects_dir, ave_subject, 'bem', 'watershed', '%s_inner_skull_surface' % ave_subject)
surf = mne.read_surface(fname_surf, return_dict=True, verbose='ERROR')[-1]
surf_temp = mne.read_surface(fname_surf_temp, return_dict=True, verbose='ERROR')[-1]
mins = np.min(surf['rr'], axis=0)
maxs = np.max(surf['rr'], axis=0)
mins_temp = np.min(surf_temp['rr'], axis=0)
maxs_temp = np.max(surf_temp['rr'], axis=0)
# Check which dimension (x,y,z) has greatest difference
diff = (maxs - mins)
diff_temp = (maxs_temp - mins_temp)
# print additional information
# for c, mi, ma, md in zip('xyz', mins, maxs, diff):
# logger.info(' %s = %6.1f ... %6.1f mm --> Difference: %6.1f mm'
# % (c, mi, ma, md))
# for c, mi, ma, md in zip('xyz', mins_temp, maxs_temp, diff_temp):
# logger.info(' %s = %6.1f ... %6.1f mm --> Difference: %6.1f mm'
# % (c, mi, ma, md))
prop = (diff / diff_temp).mean()
indiv_spacing = (prop * template_spacing)
print(" '%s' individual-spacing to '%s'[%.2f] is: %.4fmm" % (
subject, ave_subject, template_spacing, indiv_spacing))
return indiv_spacing | cbe5120093fdf78913c2386820d3388aca0724d1 | 3,655,584 |
def sqlpool_blob_auditing_policy_update(
cmd,
instance,
state=None,
storage_account=None,
storage_endpoint=None,
storage_account_access_key=None,
storage_account_subscription_id=None,
is_storage_secondary_key_in_use=None,
retention_days=None,
audit_actions_and_groups=None,
is_azure_monitor_target_enabled=None):
"""
Updates a sql pool blob auditing policy. Custom update function to apply parameters to instance.
"""
_audit_policy_update(cmd, instance, state, storage_account, storage_endpoint, storage_account_access_key,
storage_account_subscription_id, is_storage_secondary_key_in_use, retention_days,
audit_actions_and_groups, is_azure_monitor_target_enabled)
return instance | e99013545172eb03ad5dddeefdb0b36b7bb2edd7 | 3,655,585 |
from typing import Optional
import os
def from_system() -> Optional[Config]:
"""
Config-factory; producing a Config based on environment variables and when
environment variables aren't set, fall back to the ``cij_root`` helper.
"""
conf = Config()
# Setup configuration using environment variable definitions
paths_from_evars = cij.paths_from_env(
"CIJ",
[f.upper() for f in CFG_FIELDS]
)
missing = False
for key, value in paths_from_evars.items():
if value is None:
missing = True
break
setattr(conf, key.lower(), value)
if not missing:
return conf
# Setup configuration using 'cij_root'
with Popen(["cij_root"], stdout=PIPE) as proc:
out, _ = proc.communicate()
if proc.returncode:
return None
cij_root = out.decode("utf-8").strip()
if not os.path.exists(cij_root):
return None
for field in CFG_FIELDS:
setattr(conf, field, os.path.join(cij_root, field))
return conf | 6fb093d33ab44851b8027642b26f0e116b17af56 | 3,655,586 |
def format_search_filter(model_fields):
"""
Creates an LDAP search filter for the given set of model
fields.
"""
ldap_fields = convert_model_fields_to_ldap_fields(model_fields);
ldap_fields["objectClass"] = settings.LDAP_AUTH_OBJECT_CLASS
search_filters = import_func(settings.LDAP_AUTH_FORMAT_SEARCH_FILTERS)(ldap_fields)
return "(&{})".format("".join(search_filters)); | b6c5c17b566c583a07ef5e9f3ec61cb868f6f8ab | 3,655,587 |
def multiprocess(func=None, pycsp_host='', pycsp_port=None):
""" @multiprocess(pycsp_host='', pycsp_port=None)
@multiprocess decorator for making a function into a CSP MultiProcess factory.
Each generated CSP process is implemented as a single OS process.
All objects and variables provided to multiprocesses through the
parameter list must support pickling.
Usage:
>>> @multiprocess
>>> def filter(dataIn, dataOut, tag, debug=False):
>>> pass # perform filtering
>>>
>>> P = filter(A.reader(), B.writer(), "42", debug=True)
or
>>> @multiprocess(pycsp_host="localhost", pycsp_port=9998)
>>> def filter(dataIn, dataOut, tag, debug=False):
>>> pass # perform filtering
>>>
>>> P = filter(A.reader(), B.writer(), "42", debug=True)
The CSP MultiProcess factory returned by the @multiprocess decorator:
func(*args, **kwargs)
"""
if func:
def _call(*args, **kwargs):
return MultiProcess(func, *args, **kwargs)
_call.__name__ = func.__name__
return _call
else:
def wrap_process(func):
def _call(*args, **kwargs):
kwargs['pycsp_host']= pycsp_host
kwargs['pycsp_port']= pycsp_port
return MultiProcess(func, *args, **kwargs)
_call.__name__ = func.__name__
return _call
return wrap_process | ec1d5c14a7eb60af0cd351c15c1ec0724e577fb0 | 3,655,588 |
def normalize_img(img):
"""
normalize image (caffe model definition compatible)
input: opencv numpy array image (h, w, c)
output: dnn input array (c, h, w)
"""
scale = 1.0
mean = [104,117,123]
img = img.astype(np.float32)
img = img * scale
img -= mean
img = np.transpose(img, (2, 0, 1))
return img | dac9ec8c942d70fb98f0b0989e9643f80dde5448 | 3,655,589 |
from typing import List
from typing import Any
def pages(lst: List[Any], n: int, title: str, *, fmt: str = "```%s```", sep: str = "\n") -> List[discord.Embed]:
# noinspection GrazieInspection
"""
Paginates a list into embeds to use with :class:disputils.BotEmbedPaginator
:param lst: the list to paginate
:param n: the number of elements per page
:param title: the title of the embed
:param fmt: a % string used to format the resulting page
:param sep: the string to join the list elements with
:return: a list of embeds
"""
l: List[List[str]] = group_list([str(i) for i in lst], n)
pgs = [sep.join(page) for page in l]
return [
discord.Embed(
title=f"{title} - {i + 1}/{len(pgs)}",
description=fmt % pg
) for i, pg in enumerate(pgs)
] | f8d9471f2d254b63754128a2e2762520f858edbd | 3,655,590 |
import re
def Substitute_Percent(sentence):
"""
Substitutes percents with special token
"""
sentence = re.sub(r'''(?<![^\s"'[(])[+-]?[.,;]?(\d+[.,;']?)+%(?![^\s.,;!?'")\]])''',
' @percent@ ', sentence)
return sentence | 61bc6970af09703ef018bfcc9378393241ae21ed | 3,655,591 |
def ready_df1(df):
"""
This function prepares the dataframe for EDA.
"""
df = remove_columns(df, columns=[ 'nitrogen_dioxide',
'nitrogen_dioxide_aqi',
'sulfur_dioxide',
'sulfur_dioxide_aqi',
'trioxygen',
'trioxygen_aqi',
'volatile',
'volatile_aqi',
])
df['fahrenheit'] = 9.0/5.0 * df['temperature'] + 32
df = df.drop(columns=['temperature'])
df = df.rename(index=str, columns={'fahrenheit':'temperature'})
df['carbon_monoxide'] = df['carbon_monoxide'].fillna(0).astype(int)
df['timestamp'] = pd.to_datetime(df['timestamp'])
return df | 3776c571d3eabb39ce27017ac1481e2bd469f68c | 3,655,592 |
def _wrap(func, args, flip=True):
"""Return partial function with flipped args if flip=True
:param function func: Any function
:param args args: Function arguments
:param bool flip: If true reverse order of arguments.
:return: Returns function
:rtype: function
"""
@wraps(func)
def flippedfunc(*args):
return func(*args[::-1])
return partial(flippedfunc if flip else func, args) | 9ac5a814840f821260d46df64b60cd6d71185dbb | 3,655,593 |
def compute_kkt_optimality(g, on_bound):
"""Compute the maximum violation of KKT conditions."""
g_kkt = g * on_bound
free_set = on_bound == 0
g_kkt[free_set] = np.abs(g[free_set])
return np.max(g_kkt) | 216cf110d64d1fd8ec89c0359ebaa9b4e4dcc773 | 3,655,594 |
def replace_cipd_revision(file_path, old_revision, new_revision):
"""Replaces cipd revision strings in file.
Args:
file_path: Path to file.
old_revision: Old cipd revision to be replaced.
new_revision: New cipd revision to use as replacement.
Returns:
Number of replaced occurrences.
Raises:
IOError: If no occurrences were found.
"""
with open(file_path) as f:
contents = f.read()
num = contents.count(old_revision)
if not num:
raise IOError('Did not find old CIPD revision {} in {}'.format(
old_revision, file_path))
newcontents = contents.replace(old_revision, new_revision)
with open(file_path, 'w') as f:
f.write(newcontents)
return num | f429e74f0dd7180ab4bf90d662f8042b958b81f8 | 3,655,595 |
def spectral_derivs_plot(spec_der, contrast=0.1, ax=None, freq_range=None,
fft_step=None, fft_size=None):
"""
Plot the spectral derivatives of a song in a grey scale.
spec_der - The spectral derivatives of the song (computed with
`spectral_derivs`) or the song itself
contrast - The contrast of the plot
ax - The matplotlib axis where the plot must be drawn, if None, a new axis
is created
freq_range - The amount of frequency to plot, usefull only if `spec_der` is
a song. Given to `spectral_derivs`
ov_params - The Parameters to override, passed to `spectral_derivs`
"""
if spec_der.ndim == 1:
spec_der = spectral_derivs(spec_der, freq_range, fft_step, fft_size)
ax = sns.heatmap(spec_der.T, yticklabels=50, xticklabels=50,
vmin=-contrast, vmax=contrast, ax=ax, cmap='Greys',
cbar=False)
ax.invert_yaxis()
return ax | 5b683d8c49e9bad2fd1fa029af6bc5660bc0e936 | 3,655,596 |
from operator import add
from operator import sub
def scale_center(pnt, fac, center):
"""scale point in relation to a center"""
return add(scale(sub(pnt, center), fac), center) | f69ca54e25d5eb8008b8f08c40500f236005e093 | 3,655,597 |
def gopherize_feed(feed_url, timestamp=False, plug=True):
"""Return a gophermap string for the feed at feed_url."""
return gopherize_feed_object(feedparser.parse(feed_url), timestamp, plug) | aaf4d35044c873e7d0f1a43c4d001ebe5e30714b | 3,655,598 |
def first_sunday_of_month(datetime: pendulum.DateTime) -> pendulum.DateTime:
"""Get the first Sunday of the month based on a given datetime.
:param datetime: the datetime.
:return: the first Sunday of the month.
"""
return datetime.start_of("month").first_of("month", day_of_week=7) | 88c517d1d38785c0d8f9c0f79f3d34199dfceb1e | 3,655,599 |
Subsets and Splits