content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import sys
import os
import shutil
def CommitOffsite(
backup_name,
backup_suffix=None,
output_stream=sys.stdout,
preserve_ansi_escape_sequences=False,
):
"""\
Commits data previously generated by Offsite. This can be useful when
additional steps must be taken (for example, upload) before a Backup can
be considered as successful.
"""
with StreamDecorator.GenerateAnsiSequenceStream(
output_stream,
preserve_ansi_escape_sequences=preserve_ansi_escape_sequences,
) as output_stream:
with output_stream.DoneManager(
line_prefix="",
prefix="\nResults: ",
suffix="\n",
) as dm:
json_filename = _CreateJsonFilename(backup_name)
pending_json_filename = _CreatePendingJsonFilename(json_filename)
if not os.path.isfile(pending_json_filename):
dm.stream.write("ERROR: Pending data was not found.\n")
dm.result = -1
return dm.result
FileSystem.RemoveFile(json_filename)
shutil.move(pending_json_filename, json_filename)
if backup_suffix:
shutil.copy2(json_filename, "{}.{}".format(json_filename, backup_suffix))
dm.stream.write("The pending data has been committed.\n")
return dm.result | bfe523c2ecabb7c4bacd2fe3015929074dbae6f3 | 3,651,000 |
def get_impropers(bonds):
"""
Iterate over bonds to get impropers.
Choose all three bonds that have one atom in common.
For each set of bonds you have 3 impropers where one of the noncommon atoms is out of plane.
Parameters
----------
bonds : list
List of atom ids that make up bonds.
Returns
-------
list
List of atom id quadruplets that make up a improper.
"""
impropers, checked = [], []
for bond in bonds:
for atom in bond:
if atom not in checked:
bonded_list = []
for bond2 in bonds:
if atom in bond2:
bonded_list.append(bond2[1 - bond2.index(atom)])
if len(bonded_list) >= 3:
for triplet in combinations(bonded_list, 3):
for out_of_plane in triplet:
imp = tuple([out_of_plane, atom] + sorted([i for i in triplet if i != out_of_plane]))
impropers.append(imp)
checked.append(atom)
return sorted(impropers) | c5c2fe4684269407cd4387d86840bd982f1d3fa5 | 3,651,001 |
def get_ret_tev_return(*args):
"""get_ret_tev_return(int n) -> ea_t"""
return _idaapi.get_ret_tev_return(*args) | 94d476d12313b7df4da32cb45cfe644a0078debb | 3,651,002 |
def make_figure_6(prefix=None, rng=None, colors=None):
"""
Figures 6, Comparison of Performance
Ported from MATLAB Code
Nicholas O'Donoughue
24 March 2021
:param prefix: output directory to place generated figure
:param rng: random number generator
:param colors: colormap for plotting
:return: figure handle
"""
# Vary Time-Bandwidth Product
tbwp_vec_db = np.arange(start=10., stop=31., step=10., dtype=int)
tbwp_vec_lin = np.expand_dims(db_to_lin(tbwp_vec_db), axis=0).astype(int)
input_snr_vec_db = np.arange(start=-20, stop=10.1, step=0.1)
input_snr_vec_lin = np.expand_dims(db_to_lin(input_snr_vec_db), axis=1)
output_snr_vec_lin = tbwp_vec_lin*input_snr_vec_lin**2/(1+2*input_snr_vec_lin)
# output_snr_vec_db = lin_to_db(output_snr_vec_lin)
# Energy Detector Performance
prob_fa = 1e-6
threshold_ed = stats.chi2.ppf(q=1-prob_fa, df=2*tbwp_vec_lin)
prob_det_ed = stats.ncx2.sf(x=threshold_ed, df=2*tbwp_vec_lin, nc=2*tbwp_vec_lin*input_snr_vec_lin)
# Cross-Correlator Performance
threshold_xc = stats.chi2.ppf(q=1-prob_fa, df=2)
prob_det_xc = stats.ncx2.sf(x=threshold_xc/(1+2*input_snr_vec_lin), df=2, nc=2*output_snr_vec_lin)
# Monte Carlo Trials
input_snr_vec_coarse_db = input_snr_vec_db[::10]
input_snr_vec_coarse_lin = db_to_lin(input_snr_vec_coarse_db)
num_monte_carlo = int(1e4)
num_tbwp = int(tbwp_vec_lin.size)
num_snr = int(input_snr_vec_coarse_lin.size)
# Generate noise vectors
noise_pwr = 1 # Unit Variance
prob_det_ed_mc = np.zeros(shape=(num_snr, num_tbwp))
prob_det_xc_mc = np.zeros(shape=(num_snr, num_tbwp))
for idx_tbwp, tbwp in enumerate(np.ravel(tbwp_vec_lin)):
# Generate the noise vectors
noise1 = np.sqrt(noise_pwr/2)*(rng.standard_normal(size=(tbwp, num_monte_carlo))
+ 1j*rng.standard_normal(size=(tbwp, num_monte_carlo)))
noise2 = np.sqrt(noise_pwr/2)*(rng.standard_normal(size=(tbwp, num_monte_carlo))
+ 1j*rng.standard_normal(size=(tbwp, num_monte_carlo)))
# Generate a signal vector
signal = np.sqrt(1/2)*(rng.standard_normal(size=(tbwp, num_monte_carlo))
+ 1j*rng.standard_normal(size=(tbwp, num_monte_carlo)))
phase_difference = np.exp(1j*rng.uniform(low=0, high=2*np.pi, size=(1, num_monte_carlo)))
for idx_snr, snr in enumerate(input_snr_vec_coarse_lin):
# Scale the signal power to match SNR
this_signal = signal * np.sqrt(snr)
y1 = this_signal+noise1
y2 = this_signal*phase_difference+noise2
det_result_ed = detector.squareLaw.det_test(z=y1, noise_var=noise_pwr/2, prob_fa=prob_fa)
prob_det_ed_mc[idx_snr, idx_tbwp] = np.sum(det_result_ed, axis=None)/num_monte_carlo
det_result_xc = detector.xcorr.det_test(y1=y1, y2=y2, noise_var=noise_pwr, num_samples=tbwp,
prob_fa=prob_fa)
prob_det_xc_mc[idx_snr, idx_tbwp] = np.sum(det_result_xc, axis=None)/num_monte_carlo
fig6 = plt.figure()
for idx, tbwp in enumerate(tbwp_vec_lin[0, :]):
if idx == 0:
ed_label = 'ED'
xc_label = 'XC'
ed_mc_label = 'ED (Monte Carlo)'
xc_mc_label = 'XC (Monte Carlo)'
else:
ed_label = None
xc_label = None
ed_mc_label = None
xc_mc_label = None
plt.plot(input_snr_vec_db, prob_det_ed[:, idx], color=colors(idx), linestyle='-', label=ed_label)
plt.plot(input_snr_vec_db, prob_det_xc[:, idx], color=colors(idx), linestyle='--', label=xc_label)
plt.scatter(input_snr_vec_coarse_db, prob_det_ed_mc[:, idx], color=colors(idx), marker='^', label=ed_mc_label)
plt.scatter(input_snr_vec_coarse_db, prob_det_xc_mc[:, idx], color=colors(idx), marker='x', label=xc_mc_label)
plt.legend(loc='lower right')
# Create ellipses
ax = plt.gca()
ell = Ellipse(xy=(2, .4), width=5, height=.05)
ell.set_fill(False)
ell.set_edgecolor(colors(0))
ax.add_artist(ell)
plt.annotate(s='TB=10', xy=(-.5, .4), xytext=(-16, .3), arrowprops=dict(arrowstyle='-', color=colors(0)))
ell = Ellipse(xy=(-3.5, .5), width=3, height=.05)
ell.set_fill(False)
ell.set_edgecolor(colors(1))
ax.add_artist(ell)
plt.annotate(s='TB=100', xy=(-5, .5), xytext=(-16, .5), arrowprops=dict(arrowstyle='-', color=colors(1)))
ell = Ellipse(xy=(-8.5, .6), width=3, height=.05)
ell.set_fill(False)
ell.set_edgecolor(colors(2))
ax.add_artist(ell)
plt.annotate(s='TB=1,000', xy=(-10, .6), xytext=(-16, .7), arrowprops=dict(arrowstyle='-', color=colors(2)))
# Save figure
if prefix is not None:
plt.savefig(prefix + 'fig6.svg')
plt.savefig(prefix + 'fig6.png')
return fig6 | 761d6ddd541dfbe42e5b57cd680306c71ae978d9 | 3,651,003 |
def slim_form(domain_pk=None, form=None):
"""
What is going on? We want only one domain showing up in the
choices. We are replacing the query set with just one object. Ther
are two querysets. I'm not really sure what the first one does, but
I know the second one (the widget) removes the choices. The third
line removes the default u'--------' choice from the drop down.
"""
return form | 7b58674e307fbbd31f0546b70309c0c723d1021c | 3,651,004 |
def input(*args):
"""
Create a new input
:param args: args the define a TensorType, can be either a TensorType or a shape and a DType
:return: the input expression
"""
tensor_type = _tensor_type_polymorhpic(*args)
return InputTensor(tensor_type, ExpressionDAG.num_inputs) | 47ab3a08f412b7dc9c679ae72bb44c76123a9057 | 3,651,005 |
def commong_substring(input_list):
"""Finds the common substring in a list of strings"""
def longest_substring_finder(string1, string2):
"""Finds the common substring between two strings"""
answer = ""
len1, len2 = len(string1), len(string2)
for i in range(len1):
match = ""
for j in range(len2):
if i + j < len1 and string1[i + j] == string2[j]:
match += string2[j]
else:
if len(match) > len(answer):
answer = match
match = ""
return answer
if len(input_list) == 2:
return longest_substring_finder(*input_list)
if len(input_list) > 2:
item0 = input_list[0]
for i in range(len(input_list) - 1):
item1 = input_list[i + 1]
item0 = commong_substring([item0, item1])
return commong_substring([item0, item1])
if len(input_list) == 1:
return input_list[0] | 9e5e0878072a5416326ac1ed0d929adcb8511b37 | 3,651,006 |
def is_valid_url(url):
"""Checks if a URL is in proper format.
Args:
url (str): The URL that should be checked.
Returns:
bool: Result of the validity check in boolean form.
"""
valid = validators.url(url)
if valid:
return True
else:
return False | b55fd89267884dfc2507966825272a02e18d34f5 | 3,651,007 |
import re
import requests
def codepoint_to_url(codepoint, style):
"""
Given an emoji's codepoint (e.g. 'U+FE0E') and a non-apple emoji style,
returns a url to to the png image of the emoji in that style.
Only works for style = 'twemoji', 'noto', and 'blobmoji'.
"""
base = codepoint.replace('U+', '').lower()
if style == 'twemoji':
# See discussion in commit 8115b76 for more information about
# why the base needs to be patched like this.
patched = re.sub(r'0*([1-9a-f][0-9a-f]*)', lambda m: m.group(1),
base.replace(' ', '-').replace('fe0f-20e3', '20e3').replace('1f441-fe0f-200d-1f5e8-fe0f', '1f441-200d-1f5e8'))
response = requests.get('https://github.com/twitter/twemoji/raw/gh-pages/v/latest')
version = response.text if response.ok else None
if version:
return 'https://github.com/twitter/twemoji/raw/gh-pages/v/%s/72x72/%s.png' \
% (version, patched)
else:
return 'https://github.com/twitter/twemoji/raw/master/assets/72x72/%s.png' \
% patched
elif style == 'noto':
return 'https://github.com/googlefonts/noto-emoji/raw/master/png/128/emoji_u%s.png' \
% base.replace(' ', '_')
elif style == 'blobmoji':
return 'https://github.com/C1710/blobmoji/raw/master/png/128/emoji_u%s.png' \
% base.replace(' ', '_') | a5b47f5409d465132e3fb7141d81dbd617981ca8 | 3,651,008 |
def getRNCS(ChargeSA):
"""The calculation of relative negative charge surface area
-->RNCS
"""
charge=[]
for i in ChargeSA:
charge.append(float(i[1]))
temp=[]
for i in ChargeSA:
temp.append(i[2])
try:
RNCG = min(charge)/sum([i for i in charge if i < 0.0])
return temp[charge.index(min(charge))]/RNCG
except:
return 0.0 | f03011de85e1bcac01b2aba4afde61a3dd9f7866 | 3,651,009 |
def handle_auth_manager_auth_exception(error):
"""Return a custom message and 403 status code"""
response_header = {'X-REQUEST-ID': util.create_request_id()}
return {'message': error.message}, 403, response_header | 4b5212f4471a21cd54d012728705e83de5c7a86f | 3,651,010 |
def get_default_converter():
"""Intended only for advanced uses"""
return _TYPECATS_DEFAULT_CONVERTER | f88cdb13d53a228ff1d77a9065c1dabd0f83ed1d | 3,651,011 |
import json
def login(request):
"""
:param: request
:return: JSON data
"""
response = {}
if request.method == 'GET':
username = request.GET.get('username')
password = request.GET.get('password')
try:
usr = models.User.objects.filter(username=username, password=password)
if usr:
response['status'] = 'success'
response['error_msg'] = ''
response['data'] = json.loads(serializers.serialize('json', usr))
else:
response['status'] = 'failure'
response['error_msg'] = '用户名或密码错误,请重试'
response['data'] = None
except Exception as e:
response['status'] = 'error'
response['error_msg'] = str(e)
response['data'] = None
return JsonResponse(response) | 2d9b6791a2160ec63929d5a37e6d8336cca7709a | 3,651,012 |
def average_win_rate(strategy, baseline=always_roll(4)):
"""Return the average win rate of STRATEGY against BASELINE. Averages the
winrate when starting the game as player 0 and as player 1.
"""
win_rate_as_player_0 = 1 - make_averaged(winner)(strategy, baseline)
win_rate_as_player_1 = make_averaged(winner)(baseline, strategy)
return (win_rate_as_player_0 + win_rate_as_player_1) / 2 | 2e6b78127543456b7e931c837cf1a9468c013c33 | 3,651,013 |
def decode(chrom):
"""
Returns the communities of a locus-based adjacency codification
in a vector of int where each position is a node id and the value
of that position the id of the community where it belongs. To position
with the same number means that those two nodes belongs to same community.
"""
try:
size = len(chrom)
last_c = 0
communities = [float("inf")] * size
pending = set(range(size))
while len(pending) != 0:
index = int(pending.pop())
neighbour = int(chrom[index])
if neighbour != -1:
communities[index] = min(last_c, communities[index], communities[neighbour])
while neighbour in pending:
pending.remove(neighbour)
communities[neighbour] = min(last_c, communities[neighbour])
neighbour = int(chrom[neighbour])
last_c += 1
return communities
except Exception as e:
raise e | 998a58e0d4efad2c079a9d023530aca37d0e226e | 3,651,014 |
import math
def bin_search(query, data):
""" Query is a coordinate interval. Approximate binary search for the query in sorted data,
which is a list of coordinates. Finishes when the closest overlapping value of query and
data is found and returns the index in data. """
i = int(math.floor(len(data)/2)) # binary search prep
lower, upper = 0, len(data)
if not upper:
return -1
tried = set()
rightfound = '' # null value in place of 0, which is a valid value for rightfound
while not (data[i][0] <= query[0] and data[i][1] >= query[0]): # query left coordinate not found in data yet
if data[i][0] <= query[1] and data[i][1] >= query[1]: # query right found, will keep looking for left
rightfound = i
if data[i][1] < query[0]: # i is too low of an index
lower = i
i = int(math.floor((lower + upper)/2.))
else: # i is too high of an index
upper = i
i = int(math.floor((lower + upper)/2.))
if i in tried or i == upper:
if data[i][0] >= query[0] and data[i][1] <= query[1]: # data interval sandwiched inside query
break
elif i + 1 < len(data) and data[i+1][0] > query[0] and data[i+1][1] < query[1]: # data can be incremented
i = i + 1
else:
i = rightfound if rightfound != '' else -1
break
tried.add(i)
return i | bb93034bc5c7e432c3fc55d4485949688e62b84a | 3,651,015 |
def get_rating(business_id):
""" GET Business rating"""
rating = list(
db.ratings.aggregate(
[{"$group": {"_id": "$business", "pop": {"$avg": "$rating"}}}]
)
)
if rating is None:
return (
jsonify(
{
"success": False,
"message": "Rating for business {} not found.".format(business_id),
}
),
404,
)
print(rating)
return jsonify({"success": True, "rating": clean_dict_helper(rating)}) | 3a1cbf3e815c879b4ddaa5185477f141b261a859 | 3,651,016 |
def fwhm(x,y):
"""Calulate the FWHM for a set of x and y values.
The FWHM is returned in the same units as those of x."""
maxVal = np.max(y)
maxVal50 = 0.5*maxVal
#this is to detect if there are multiple values
biggerCondition = [a > maxVal50 for a in y]
changePoints = []
xPoints = []
for k in range(len(biggerCondition)-1):
if biggerCondition[k+1] != biggerCondition[k]:
changePoints.append(k)
assert len(changePoints) == 2, "More than two crossings of the threshold found."
for k in changePoints:
# do a polyfit
# with the points before and after the point where the change occurs.
# note that here we are fitting the x values as a function of the y values.
# then we can use the polynom to compute the value of x at the threshold, i.e. at maxVal50.
yPolyFit = x[k-1:k+2]
xPolyFit = y[k-1:k+2]
z = np.polyfit(xPolyFit,yPolyFit,2)
p = np.poly1d(z)
xThis = p(maxVal50)
xPoints.append(xThis)
if len(xPoints) == 2:
linewidth = xPoints[1] - xPoints[0]
else:
linewidth = None
print(sorted(xPoints))
return linewidth | 2dc18d15d2940520acde39c5914413d89e9fbc71 | 3,651,017 |
import glob
def parse_names(input_folder):
"""
:param input_folder:
:return:
"""
name_set = set()
if args.suffix:
files = sorted(glob(f'{input_folder}/*{args.suffix}'))
else:
files = sorted(glob(f'{input_folder}/*'))
for file in files:
with open(file) as f:
for record in SeqIO.parse(f, args.in_format):
fname = record.description
name = fname.split('_')[0]
name_set.add(name)
return files, sorted(list(name_set)) | 10b72d9822d6c8057f9bc45936c8d1bfb1a029b6 | 3,651,018 |
from typing import Iterable
from typing import Tuple
from typing import Mapping
from typing import Union
def build_charencoder(corpus: Iterable[str], wordlen: int=None) \
-> Tuple[int, Mapping[str, int], TextEncoder]:
"""
Create a char-level encoder: a Callable, mapping strings into integer arrays.
Encoders dispatch on input type: if you pass a single string, you will get
a 1D array, if you pass an Iterable of strings, you will get a 2D array
where row i encodes the i-th string in the Iterable.
:param corpus: an Iterable of strings to extract characters from. The
encoder will map any non-ASCII character into the OOV code.
:param wordlen: when `wordlen` is None and an encoder receives an Iterable of
strings, the second dimension in the output array will be as long as the
longest string, otherwise it will be `wordlen` long. In the latter case
words exceeding `wordlen` will be trimmed. In both cases empty-spaces are
filled with zeros.
in the Iterable. If wordlen is not
:return: the OOV code, a character mapping representing non-OOV character
encodings, an encoder
"""
if wordlen and wordlen < 1:
raise ValueError('`wordlen` must be positive')
try:
charmap = {char: i + 1 for i, char in enumerate(asciicharset(corpus))}
except TypeError:
raise ValueError('`corpus` can be either a string or an Iterable of '
'strings')
if not charmap:
raise ValueError('the `corpus` is empty')
oov = len(charmap) + 1
def encode_string(string: str) -> np.ndarray:
if not string:
raise ValueError("can't encode empty strings")
return np.fromiter((charmap.get(char, oov) for char in string), np.int32,
len(string))
def charencoder(target: Union[str, Iterable[str]]):
if isinstance(target, str):
return encode_string(target)
encoded_strings = list(map(encode_string, target))
if not encoded_strings:
raise ValueError('there are no `target`')
return preprocessing.stack(
encoded_strings, [wordlen or -1], np.int32, 0, True)[0]
return oov, charmap, charencoder | 207a5f499930f2c408ac88199ac45c60b3ed9d97 | 3,651,019 |
import struct
def Decodingfunc(Codebyte):
"""This is the version 'A' of decoding function,
that decodes data coded by 'A' coding function"""
Decodedint=struct.unpack('b',Codebyte)[0]
N=0 #number of repetitions
L=0 # length of single/multiple sequence
if Decodedint >= 0: #single
N = 1
L = Decodedint+1
else: #multiple
L = -Decodedint//16+1
N = -Decodedint-(L-1)*16+1
#print("N =",N," L =",L)
return (N,L) | 450a3e6057106e9567952b33271935392702aea9 | 3,651,020 |
def _metric_notification_text(metric: MetricNotificationData) -> str:
"""Return the notification text for the metric."""
new_value = "?" if metric.new_metric_value is None else metric.new_metric_value
old_value = "?" if metric.old_metric_value is None else metric.old_metric_value
unit = metric.metric_unit if metric.metric_unit.startswith("%") else f" {metric.metric_unit}"
old_value_text = " (unchanged)" if new_value == old_value else f", was {old_value}{unit}"
return (
f" * *{metric.metric_name}* status is {metric.new_metric_status}, was {metric.old_metric_status}. "
f"Value is {new_value}{unit}{old_value_text}.\n"
) | 855ec000b3e37d9f54e4a12d7df4f973b15b706f | 3,651,021 |
from typing import Optional
from typing import Union
from typing import List
from typing import Dict
def train_dist(
domain: Text,
config: Text,
training_files: Optional[Union[Text, List[Text]]],
output: Text = rasa.shared.constants.DEFAULT_MODELS_PATH,
dry_run: bool = False,
force_training: bool = False,
fixed_model_name: Optional[Text] = None,
persist_nlu_training_data: bool = False,
core_additional_arguments: Optional[Dict] = None,
nlu_additional_arguments: Optional[Dict] = None,
model_to_finetune: Optional[Text] = None,
finetuning_epoch_fraction: float = 1.0,
) -> TrainingResult:
"""Trains a Rasa model (Core and NLU).
Args:
domain: Path to the domain file.
config: Path to the config file.
training_files: List of paths to training data files.
output: Output directory for the trained model.
dry_run: If `True` then no training will be done, and the information about
whether the training needs to be done will be printed.
force_training: If `True` retrain model even if data has not changed.
fixed_model_name: Name of model to be stored.
persist_nlu_training_data: `True` if the NLU training data should be persisted
with the model.
core_additional_arguments: Additional training parameters for core training.
nlu_additional_arguments: Additional training parameters forwarded to training
method of each NLU component.
model_to_finetune: Optional path to a model which should be finetuned or
a directory in case the latest trained model should be used.
finetuning_epoch_fraction: The fraction currently specified training epochs
in the model configuration which should be used for finetuning.
Returns:
An instance of `TrainingResult`.
"""
file_importer = TrainingDataImporter.load_from_config(
config, domain, training_files
)
stories = file_importer.get_stories()
nlu_data = file_importer.get_nlu_data()
training_type = TrainingType.BOTH
if nlu_data.has_e2e_examples():
rasa.shared.utils.common.mark_as_experimental_feature("end-to-end training")
training_type = TrainingType.END_TO_END
if stories.is_empty() and nlu_data.contains_no_pure_nlu_data():
rasa.shared.utils.cli.print_error(
"No training data given. Please provide stories and NLU data in "
"order to train a Rasa model using the '--data' argument."
)
return TrainingResult(code=1)
domain = file_importer.get_domain()
if domain.is_empty():
rasa.shared.utils.cli.print_warning(
"Core training was skipped because no valid domain file was found. "
"Only an NLU-model was created. Please specify a valid domain using "
"the '--domain' argument or check if the provided domain file exists."
)
training_type = TrainingType.NLU
elif stories.is_empty():
rasa.shared.utils.cli.print_warning(
"No stories present. Just a Rasa NLU model will be trained."
)
training_type = TrainingType.NLU
# We will train nlu if there are any nlu example, including from e2e stories.
elif nlu_data.contains_no_pure_nlu_data() and not nlu_data.has_e2e_examples():
rasa.shared.utils.cli.print_warning(
"No NLU data present. Just a Rasa Core model will be trained."
)
training_type = TrainingType.CORE
with telemetry.track_model_training(
file_importer, model_type="rasa",
):
return _train_graph_dist(
file_importer,
training_type=training_type,
output_path=output,
fixed_model_name=fixed_model_name,
model_to_finetune=model_to_finetune,
force_full_training=force_training,
persist_nlu_training_data=persist_nlu_training_data,
finetuning_epoch_fraction=finetuning_epoch_fraction,
dry_run=dry_run,
**(core_additional_arguments or {}),
**(nlu_additional_arguments or {}),
) | 1d1f55dca4a6274713cdd17a7ff5efcc90b46d14 | 3,651,022 |
import logging
def convert_image_to_nifti(path_image, path_out_dir=None):
""" converting normal image to Nifty Image
:param str path_image: input image
:param str path_out_dir: path to output folder
:return str: resulted image
>>> path_img = os.path.join(update_path('data-images'), 'images',
... 'artificial_moving-affine.jpg')
>>> path_img2 = convert_image_to_nifti(path_img, '.')
>>> path_img2 # doctest: +ELLIPSIS
'...artificial_moving-affine.nii'
>>> os.path.isfile(path_img2)
True
>>> path_img3 = convert_image_from_nifti(path_img2)
>>> os.path.isfile(path_img3)
True
>>> list(map(os.remove, [path_img2, path_img3])) # doctest: +ELLIPSIS
[...]
"""
path_image = update_path(path_image)
path_img_out = _gene_out_path(path_image, '.nii', path_out_dir)
logging.debug('Convert image to Nifti format "%s" -> "%s"', path_image, path_img_out)
# img = Image.open(path_file).convert('LA')
img = load_image(path_image)
nim = nibabel.Nifti1Pair(img, np.eye(4))
del img
nibabel.save(nim, path_img_out)
return path_img_out | 96783ad091e9b0949aa74a729b75337b9c96a0d0 | 3,651,023 |
def wav2vec2_base() -> Wav2Vec2Model:
"""Build wav2vec2 model with "base" configuration
This is one of the model architecture used in *wav2vec 2.0*
[:footcite:`baevski2020wav2vec`] for pretraining.
Returns:
Wav2Vec2Model:
"""
return _get_model(
extractor_mode="group_norm",
extractor_conv_layer_config=None,
extractor_conv_bias=False,
encoder_embed_dim=768,
encoder_projection_dropout=0.1,
encoder_pos_conv_kernel=128,
encoder_pos_conv_groups=16,
encoder_num_layers=12,
encoder_num_heads=12,
encoder_attention_dropout=0.1,
encoder_ff_interm_features=3072,
encoder_ff_interm_dropout=0.1,
encoder_dropout=0.1,
encoder_layer_norm_first=False,
encoder_layer_drop=0.1,
aux_num_out=None,
) | fb288116f5ef57b314ecfde4a85b1a9bb5d437ce | 3,651,024 |
from unittest.mock import patch
def dont_handle_lock_expired_mock(app):
"""Takes in a raiden app and returns a mock context where lock_expired is not processed
"""
def do_nothing(raiden, message): # pylint: disable=unused-argument
return []
return patch.object(
app.raiden.message_handler, "handle_message_lockexpired", side_effect=do_nothing
) | 2a893e7e755010104071b2b1a93b60a0417e5457 | 3,651,025 |
import sys
import _warnings
def _find_spec(name, path, target=None):
"""Find a module's spec."""
meta_path = sys.meta_path
if meta_path is None:
# PyImport_Cleanup() is running or has been called.
raise ImportError("sys.meta_path is None, Python is likely "
"shutting down")
if not meta_path:
_warnings.warn('sys.meta_path is empty', ImportWarning)
# We check sys.modules here for the reload case. While a passed-in
# target will usually indicate a reload there is no guarantee, whereas
# sys.modules provides one.
is_reload = name in sys.modules
for finder in meta_path:
with _ImportLockContext():
try:
find_spec = finder.find_spec
except AttributeError:
spec = _find_spec_legacy(finder, name, path)
if spec is None:
continue
else:
spec = find_spec(name, path, target)
if spec is not None:
# The parent import may have already imported this module.
if not is_reload and name in sys.modules:
module = sys.modules[name]
try:
__spec__ = module.__spec__
except AttributeError:
# We use the found spec since that is the one that
# we would have used if the parent module hadn't
# beaten us to the punch.
return spec
else:
if __spec__ is None:
return spec
else:
return __spec__
else:
return spec
else:
return None | 17fe8116db7f2fedd92ab98755d4e1b4971fac96 | 3,651,026 |
def system(_printer, ast):
"""Prints the instance system initialization."""
process_names_str = ' < '.join(map(lambda proc_block: ', '.join(proc_block), ast["processNames"]))
return f'system {process_names_str};' | f16c6d5ebe1a029c07efd1f34d3079dd02eb4ac0 | 3,651,027 |
import random
def genmove(proc, colour, pluck_random=True):
""" Send either a `genmove` command to the client, or generate a random
move until it is accepted by the client """
if pluck_random and random() < 0.05:
for _count in range(100):
proc.stdin.write('1000 play %s %s\n' % (colour, random_vertex(),))
proc.stdin.flush()
for line in proc.stdout:
line = (str(line) or '').strip()
print(line)
if line.startswith('=1000'):
vertex = line.split(' ', maxsplit=2)[-1].strip()
return vertex
elif line.startswith('?1000'):
break
return 'pass'
else:
proc.stdin.write('2000 genmove %s\n' % (colour,))
proc.stdin.flush()
for line in proc.stdout:
line = (str(line) or '').strip()
print(line)
if line.startswith('=2000'):
vertex = line.split(' ', maxsplit=2)[-1].strip()
return vertex
return None | 589a054be52c40507d8aba5f10a3d67489ec301b | 3,651,028 |
def geojson_to_meta_str(txt):
""" txt is assumed to be small
"""
vlayer = QgsVectorLayer(txt, "tmp", "ogr")
crs_str = vlayer.sourceCrs().toWkt()
wkb_type = vlayer.wkbType()
geom_str = QgsWkbTypes.displayString(wkb_type)
feat_cnt = vlayer.featureCount()
return geom_str, crs_str, feat_cnt | 33b0a2055ec70c2142977469384a20b99d26cee8 | 3,651,029 |
def tdf_UppestID(*args):
"""
* Returns ID 'ffffffff-ffff-ffff-ffff-ffffffffffff'.
:rtype: Standard_GUID
"""
return _TDF.tdf_UppestID(*args) | 1d9d5c528a2f202d49c104b7a56dd7a75b9bc795 | 3,651,030 |
def blend_multiply(cb: float, cs: float) -> float:
"""Blend mode 'multiply'."""
return cb * cs | d53c3a49585cf0c12bf05c233fc6a9dd30ad25b9 | 3,651,031 |
def print_data_distribution(y_classes, class_names):
"""
:param y_classes: class of each instance, for example, if there are 3 classes, and y[i] is [1,0,0], then instance[i] belongs to class[0]
:param class_names: name of each class
:return: None
"""
count = np.zeros(len(class_names))
pro = []
num = []
for y in y_classes:
class_index = np.argmax(y)
count[class_index] = count[class_index] + 1
for i, class_name in enumerate(class_names):
print(class_name, count[i])
pro.append(class_name)
num.append(count[i])
return pro, num | 289ada7cab00153f894e81dd32980b8d224d637c | 3,651,032 |
import collections
def reorder_conj_pols(pols):
"""
Reorders a list of pols, swapping pols that are conjugates of one another.
For example ('xx', 'xy', 'yx', 'yy') -> ('xx', 'yx', 'xy', 'yy')
This is useful for the _key2inds function in the case where an antenna
pair is specified but the conjugate pair exists in the data. The conjugated
data should be returned in the order of the polarization axis, so after conjugating
the data, the pols need to be reordered.
For example, if a file contains antpair (0, 1) and pols 'xy' and 'yx', but
the user requests antpair (1, 0), they should get:
[(1x, 0y), (1y, 0x)] = [conj(0y, 1x), conj(0x, 1y)]
Args:
pols: Polarization array (strings or ints)
Returns:
conj_order: Indices to reorder polarization axis
"""
if not isinstance(pols, collections.Iterable):
raise ValueError('reorder_conj_pols must be given an array of polarizations.')
cpols = np.array([conj_pol(p) for p in pols]) # Array needed for np.where
conj_order = [np.where(cpols == p)[0][0] if p in cpols else -1 for p in pols]
if -1 in conj_order:
raise ValueError('Not all conjugate pols exist in the polarization array provided.')
return conj_order | 98730f8434eff02c9a63506e01fbcd478e23e76e | 3,651,033 |
def get_machine_from_uuid(uuid):
"""Helper function that returns a Machine instance of this uuid."""
machine = Machine()
machine.get_from_uuid(uuid)
return machine | 6f78afd9547af5c83abf49a1ac56209ee0e6b506 | 3,651,034 |
def convert_numbers(text):
"""Convert numbers to number words"""
tokens = []
for token in text.split(" "):
try:
word = w2n.num_to_word(token)
tokens.append(word)
except:
tokens.append(token)
return " ".join(tokens) | 8d6eb622076a0404824db2dbeaaba704f3bf6e79 | 3,651,035 |
def init_emulator(rom: bytes):
""" For use in interactive mode """
emulator = NitroEmulator()
emulator.load_nds_rom(rom, True)
return emulator | 9ecaa2a876b8e5bd93deece3ccc62b41ef9c6f3f | 3,651,036 |
from typing import Dict
from typing import Union
import torch
def sub_module_name_of_named_params(named_params: kParamDictType, module_name_sub_dict: Dict[str, str]) \
-> Union[Dict[str, nn.Parameter], Dict[str, torch.Tensor]]:
"""Sub named_parameters key's module name part with module_name_sub_dict.
Args:
named_params: Key-value pair of param name and param value.
module_name_sub_dict: Module names' sub dict.
Returns:
named parameters whose module name part of it's param name is subbed by module_name_sub_dict.
"""
sub_named_params = dict()
for module_param_name, value in named_params.items():
param_name, module_name = map(lambda inverse_name: inverse_name[::-1],
module_param_name[::-1].split('.', maxsplit=1))
if module_name not in module_name_sub_dict:
sub_named_params[module_param_name] = value
else:
sub_named_params[module_name_sub_dict[module_name] + '.' + param_name] = value
return sub_named_params | 8bbcdb865f2b0c452c773bc18767128561e806c7 | 3,651,037 |
import inspect
def add_mongodb_document(
m_client_db=get_mongodb_database(),
collection=None,
index_name=None,
doc_type=None,
doc_uuid=None,
doc_body=None
):
"""
Funtion to add a MongoDB document by providing index_name,
document type, document contents as doc and document id.
"""
status = { 'status_code' : 200 }
log_.debug( "function : %s", inspect.stack()[0][3] )
log_.debug( "locals() : \n%s", pformat(locals()))
db = m_client_db[ collection ]
### TO DO
try :
res = db.insert_one(
doc_body,
)
res_add = {
'item_id' : str(res.inserted_id),
'operation' : "item added"
}
except :
res = {}
res_add = {
'item_id' : None,
'operation' : 'not added...'
}
status = {
'status_code' : 500,
'error' : "",
'info' : ""
}
# log_.debug( "res : \n%s", pformat(res.__dict__))
log_.debug( "res_add : \n%s", pformat(res_add))
print()
return res_add, status | 08917c72a183d30d30d7e62ff4b5a827cf11de17 | 3,651,038 |
def my_func_1(x, y):
"""
Возвращает возведение числа x в степень y.
Именованные параметры:
x -- число
y -- степень
(number, number) -> number
>>> my_func_1(2, 2)
4
"""
return x ** y | 9572566f1660a087056118bf974bf1913348dfa4 | 3,651,039 |
def indexer_testapp(es_app):
""" Indexer testapp, meant for manually triggering indexing runs by posting to /index.
Always uses the ES app (obviously, but not so obvious previously) """
environ = {
'HTTP_ACCEPT': 'application/json',
'REMOTE_USER': 'INDEXER',
}
return webtest.TestApp(es_app, environ) | 59343963307c39e43034664febb0ebf00f6ab1bd | 3,651,040 |
def BNN_like(NN,cls=tfp.layers.DenseReparameterization,copy_weight=False,**kwargs):
"""
Create Bayesian Neural Network like input Neural Network shape
Parameters
----------
NN : tf.keras.Model
Neural Network for imitating shape
cls : tfp.layers
Bayes layers class
copy_weight : bool, optional
Copy weight from NN when `True`. The default is `False`
Returns
-------
model : tf.keras.Model
Bayes Neural Network
"""
inputs = tf.keras.Input(shape=(tf.shape(NN.layers[0].kernel)[0],))
x = inputs
for i, L in enumerate(NN.layers):
layer_kwargs = { **kwargs }
if copy_weight:
layer_kwargs["kernel_prior_fn": multivariate_normal_fn(L.kernel)]
layer_kwargs["bias_prior_fn": multivariate_normal_fn(L.bias)]
x = cls(L.units,activation=L.activation,**layer_kwargs)(x)
return tf.keras.Model(inputs=inputs,outputs=x) | 9039f70701fd832843fd160cd71d5d46f7b17b56 | 3,651,041 |
def matrix_mult(a, b):
"""
Function that multiplies two matrices a and b
Parameters
----------
a,b : matrices
Returns
-------
new_array : matrix
The matrix product of the inputs
"""
new_array = []
for i in range(len(a)):
new_array.append([0 for i in range(len(b[0]))])
for j in range(len(b[0])):
for k in range(len(a[0])):
new_array[i][j] += a[i][k] * b[k][j]
return new_array | 5e0f27f29b6977ea38987fa243f08bb1748d4567 | 3,651,042 |
from typing import Tuple
from typing import List
from typing import Type
from typing import _Final
def build_type(tp) -> Tuple[str, List[Type]]:
"""
Build typescript type from python type.
"""
tokens = tokenize_python_type(tp)
dependencies = [
token
for token in tokens
if token not in TYPE_MAPPING_WITH_GENERIC_FALLBACK
and not type(token) in TRIVIAL_TYPE_MAPPING
and not isinstance(token, _Final)
]
return _build_type(tokens), dependencies | 475362488b7fe07db035ce70ddb3ac40580412dd | 3,651,043 |
def laplacian_radial_kernel(distance, bandwidth=1.0):
"""Laplacian radial kernel.
Parameters
----------
distance : array-like
Array of non-negative real values.
bandwidth : float, optional (default=1.0)
Positive scale parameter of the kernel.
Returns
-------
weight : array-like
Array of non-negative real values of the same shape than
parameter 'distance'.
Returns
-------
http://crsouza.com/2010/03/17/
kernel-functions-for-machine-learning-applications/
https://data-flair.training/blogs/svm-kernel-functions/
"""
distance = _check_distance(distance)
bandwidth = _check_bandwidth(bandwidth)
scaled_distance = distance / bandwidth
weight = gs.exp(- scaled_distance)
return weight | fd5f777b0d21e3a7673a6589a76dd50f48384029 | 3,651,044 |
def build_eslog_config_param(
group_id,
task_name,
rt_id,
tasks,
topic,
table_name,
hosts,
http_port,
transport,
es_cluster_name,
es_version,
enable_auth,
user,
password,
):
"""
es参数构建
:param group_id: 集群名
:param task_name: 任务名
:param rt_id: rt_id
:param tasks: 任务数
:param topic: 来源topic
:param table_name: 表名
:param hosts: es的host
:param http_port: es的port
:param transport: es transport的port
:param es_cluster_name: es集群名称
:param es_version es集群版本
:param enable_auth 是否启用验证
:param user: 用户名
:param password: 密码, 加密过的
:return: 参数
"""
return {
"group.id": group_id,
"rt.id": rt_id,
"topics": topic,
"type.name": table_name,
"tasks.max": "%s" % tasks,
"es.index.prefix": table_name.lower(),
"es.cluster.name": es_cluster_name,
"es.cluster.version": es_version,
"es.hosts": hosts,
"es.transport.port": transport,
"es.host": hosts,
"es.http.port": http_port,
"connector.class": "com.tencent.bk.base.datahub.databus.connect.sink.es.EsSinkConnector",
"flush.timeout.ms": "10000",
"batch.size": "10000",
"max.in.flight.requests": "5",
"retry.backoff.ms": "5000",
"max.retry": "5",
"es.cluster.enable.auth": enable_auth,
"es.cluster.enable.PlaintextPwd": False, # 当前都是加密后的密码
"es.cluster.username": user,
"es.cluster.password": password,
} | 826b8d97ef14792845b4ced98ab5dcb3f36e57f3 | 3,651,045 |
def disclosure(input_df, cur_period):
"""
Reading in a csv, converting to a data frame and converting some cols to int.
:param input_df: The csv file that is converted into a data frame.
:param cur_period: The current period for the results process.
:return: None.
"""
input_df = pd.read_csv(input_df, dtype={"Q601_asphalting_sand": int,
'Q602_building_soft_sand': int,
'Q603_concreting_sand': int,
'Q604_bituminous_gravel': int,
'Q605_concreting_gravel': int,
'Q606_other_gravel': int,
'Q607_constructional_fill': int,
'Q608_total': int,
'enterprise_ref': int, 'period': int,
'region': int})
input_df["disclosive"] = None
input_df["publish"] = None
input_df["reason"] = None
def run_disclosure(row):
if row['Q608_total'] == 0:
row['disclosive'] = 'N'
row['publish'] = 'Publish'
row['reason'] = ' Total is zero'
else:
row['disclosive'] = 'Y'
row['publish'] = 'N/A'
return row
disaggregated_data = input_df[input_df.period == cur_period]
region_agg = disaggregated_data.groupby('region')
region_agg = region_agg.agg({'Q608_total': 'sum', 'Q607_constructional_fill': 'sum',
'Q606_other_gravel': 'sum', 'Q605_concreting_gravel': 'sum',
'Q604_bituminous_gravel': 'sum', 'Q603_concreting_sand': 'sum',
'Q602_building_soft_sand': 'sum', 'Q601_asphalting_sand': 'sum',
'enterprise_ref': 'nunique'})
region_agg = region_agg.apply(run_disclosure, axis=1)
# regionlorm = disaggregated_data.groupby(['region'])
region_agg_lorm = disaggregated_data.groupby(['region', 'land_or_marine'])
return region_agg_lorm | 65702fa309884206f284b35c48e2e8c8a34aef2b | 3,651,046 |
import os
def get_all_file_paths(directory):
"""
Gets all the files in the specified input directory
"""
file_paths = []
for root, _, files in os.walk(directory):
for filename in files:
filepath = os.path.join(root, filename)
file_paths.append(filepath)
return file_paths | 7055a3f3f3be5f6e0074cef55689c6234d38deb6 | 3,651,047 |
def kitchen_sink():
"""Combines all of the test data."""
return word_frequencies.load(_KITCHEN_SINK_DATA) | 4e0b0d38465fb02cd4f8aeb5e54c2f6bcbdf2cda | 3,651,048 |
import torch
def sim_matrix(a, b, eps=1e-8):
"""
added eps for numerical stability
"""
a = normalize_embeddings(a, eps)
b = normalize_embeddings(b, eps)
sim_mt = torch.mm(a, b.transpose(0, 1))
return sim_mt | d0caa5ce6e9f86b861910221321b80752b4f24e4 | 3,651,049 |
def read_addon_xml(path):
"""Parse the addon.xml and return an info dictionary"""
info = dict(
path='./', # '/storage/.kodi/addons/plugin.video.vrt.nu',
profile='special://userdata', # 'special://profile/addon_data/plugin.video.vrt.nu/',
type='xbmc.python.pluginsource',
)
tree = ET.parse(path)
root = tree.getroot()
info.update(root.attrib) # Add 'id', 'name' and 'version'
info['author'] = info.pop('provider-name')
for child in root:
if child.attrib.get('point') != 'xbmc.addon.metadata':
continue
for grandchild in child:
# Handle assets differently
if grandchild.tag == 'assets':
for asset in grandchild:
info[asset.tag] = asset.text
continue
# Not in English ? Drop it
if grandchild.attrib.get('lang', 'en_GB') != 'en_GB':
continue
# Add metadata
info[grandchild.tag] = grandchild.text
return {info['name']: info} | 6ead602b97c12bfd78ddc7194102a84793aa631b | 3,651,050 |
import requests
def get_submission_list(start_timestamp, end_timestamp, args=None):
"""
Scrapes a subreddit for submissions between to given dates. Due to limitations
of the underlying service, it may not return all the possible submissions, so
it will be necessary to call this method again. The method requests the results
in descending orders, so in subsequent calls, you should only update end_timestamp.
:param start_timestamp: request results after this date/time.
:param end_timestamp: request results before this date/time.
:param args: the args to pass to the endpoint
:return: the JSON object returned by the service.
"""
# Generic parameters: for each submission we want its ID and timestamp,
# 500 is the maximum limit, sorted temporally by the most recent
params = "fields=id,created_utc,subreddit&limit=500&sort=desc&sort_type=created_utc"
if args:
for key, value in args.items():
params += "&{0}={1}".format(key, value)
url = "{0}?before={1}&after={2}&{3}".format(
PUSHSHIFT_ENDPOINT, end_timestamp, start_timestamp, params
)
resp = requests.get(url)
return resp.json() | 7fa053c27787136420a9004721c1954318deeedb | 3,651,051 |
def loadDataSet():
"""
load data from data set
Args:
Returns:
dataSet: train input of x
labelSet: train input of y
"""
# initialize x-trainInput,y-trainInput
dataSet = []
labelSet = []
# open file reader
fr = open('testSet.txt')
for line in fr.readlines():
# strip() -- get rid of the space on both side
# split() -- division as tab
lineArr = line.strip().split()
# padding data in list
# x0 = 1.0 , x1 = column1 , x2 = column2
dataSet.append([1.0, float(lineArr[0]), float(lineArr[1])])
# label = column3
labelSet.append(float(lineArr[2]))
return dataSet,labelSet | 38f42a8a7c6b12e3d46d757d98565222e931149f | 3,651,052 |
def xds_read_xparm_new_style(xparm_file):
"""Parse the XPARM file to a dictionary."""
data = map(float, " ".join(open(xparm_file, "r").readlines()[1:]).split())
starting_frame = int(data[0])
phi_start, phi_width = data[1:3]
axis = data[3:6]
wavelength = data[6]
beam = data[7:10]
spacegroup = int(data[10])
cell = data[11:17]
a, b, c = data[17:20], data[20:23], data[23:26]
assert int(data[26]) == 1
nx, ny = map(int, data[27:29])
px, py = data[29:31]
ox, oy = data[31:33]
distance = data[33]
x, y = data[34:37], data[37:40]
normal = data[40:43]
results = {
"starting_frame": starting_frame,
"phi_start": phi_start,
"phi_width": phi_width,
"axis": axis,
"wavelength": wavelength,
"beam": beam,
"nx": nx,
"ny": ny,
"px": px,
"py": py,
"distance": distance,
"ox": ox,
"oy": oy,
"x": x,
"y": y,
"normal": normal,
"spacegroup": spacegroup,
"cell": cell,
"a": a,
"b": b,
"c": c,
}
return results | ba5a851c68c54aa0c9f82df1dc2334f427c8cea8 | 3,651,053 |
def clear_bit(val, offs):
"""Clear bit at offset 'offs' in value."""
return val & ~(1 << offs) | e50e5f8ccc3fe08d9b19248e290c2117b78379ee | 3,651,054 |
def get_org_details(orgs):
"""Get node and site details, store in Org object"""
org_details = []
for org in orgs:
org_id = org['id']
org_name = org['name']
org_longname = org['longname']
Org = namedtuple('Org', ['org_id', 'org_name', 'org_longname'])
org_details.extend([Org(org_id, org_name, org_longname)])
return org_details | 94bec33c2fbee35210ca61f6b8d3694d198c80ee | 3,651,055 |
import logging
def flush_after(handler, delay):
"""Add 'handler' to the queue so that it is flushed after 'delay' seconds by the flush thread.
Return the scheduled event which may be used for later cancellation (see cancel()).
"""
if not isinstance(handler, logging.Handler):
raise TypeError("handler must be a logging.Handler instance")
return _FLUSH_THREAD.submit(handler.flush, delay) | a8cb8197643dbd092f709bed0726d076997e4715 | 3,651,056 |
def _ExtractCLPath(output_of_where):
"""Gets the path to cl.exe based on the output of calling the environment
setup batch file, followed by the equivalent of `where`."""
# Take the first line, as that's the first found in the PATH.
for line in output_of_where.strip().splitlines():
if line.startswith('LOC:'):
return line[len('LOC:'):].strip() | 6a0c0d4aa74b4e84de69de023e2721edd95c36bd | 3,651,057 |
import math
def logGamma(x):
"""The natural logarithm of the gamma function.
Based on public domain NETLIB (Fortran) code by W. J. Cody and L. Stoltz<BR>
Applied Mathematics Division<BR>
Argonne National Laboratory<BR>
Argonne, IL 60439<BR>
<P>
References:
<OL>
<LI>W. J. Cody and K. E. Hillstrom, 'Chebyshev Approximations for the Natural Logarithm of the Gamma Function,' Math. Comp. 21, 1967, pp. 198-203.
<LI>K. E. Hillstrom, ANL/AMD Program ANLC366S, DGAMMA/DLGAMA, May, 1969.
<LI>Hart, Et. Al., Computer Approximations, Wiley and sons, New York, 1968.
</OL></P><P>
From the original documentation:
</P><P>
This routine calculates the LOG(GAMMA) function for a positive real argument X.
Computation is based on an algorithm outlined in references 1 and 2.
The program uses rational functions that theoretically approximate LOG(GAMMA)
to at least 18 significant decimal digits. The approximation for X > 12 is from reference 3,
while approximations for X < 12.0 are similar to those in reference 1, but are unpublished.
The accuracy achieved depends on the arithmetic system, the compiler, the intrinsic functions,
and proper selection of the machine-dependent constants.
</P><P>
Error returns:<BR>
The program returns the value XINF for X .LE. 0.0 or when overflow would occur.
The computation is believed to be free of underflow and overflow."""
y = x
if y < 0.0 or y > LOG_GAMMA_X_MAX_VALUE:
# Bad arguments
return float("inf")
if y <= EPS:
return -math.log(y)
if y <= 1.5:
if (y < pnt68):
corr = -math.log(y)
xm1 = y
else:
corr = 0.0;
xm1 = y - 1.0;
if y <= 0.5 or y >= pnt68:
xden = 1.0;
xnum = 0.0;
for i in xrange(8):
xnum = xnum * xm1 + lg_p1[i];
xden = xden * xm1 + lg_q1[i];
return corr + xm1 * (lg_d1 + xm1 * (xnum / xden));
else:
xm2 = y - 1.0;
xden = 1.0;
xnum = 0.0;
for i in xrange(8):
xnum = xnum * xm2 + lg_p2[i];
xden = xden * xm2 + lg_q2[i];
return corr + xm2 * (lg_d2 + xm2 * (xnum / xden));
if (y <= 4.0):
xm2 = y - 2.0;
xden = 1.0;
xnum = 0.0;
for i in xrange(8):
xnum = xnum * xm2 + lg_p2[i];
xden = xden * xm2 + lg_q2[i];
return xm2 * (lg_d2 + xm2 * (xnum / xden));
if y <= 12.0:
xm4 = y - 4.0;
xden = -1.0;
xnum = 0.0;
for i in xrange(8):
xnum = xnum * xm4 + lg_p4[i];
xden = xden * xm4 + lg_q4[i];
return lg_d4 + xm4 * (xnum / xden);
assert y <= lg_frtbig
res = lg_c[6];
ysq = y * y;
for i in xrange(6):
res = res / ysq + lg_c[i];
res /= y;
corr = math.log(y);
res = res + LOGSQRT2PI - 0.5 * corr;
res += y * (corr - 1.0);
return res | 36128e9a4b765dcc85ef42866fdbe7d16140ea1d | 3,651,058 |
def regional_validity(query_point, regional_inclusion, regional_exclusions):
""" regional_validity
Returns whether a coordinate point is inside a polygon and outside of excluded regions.
Input: A Point object, a Polygon Object of the inclusion region; a list of Polygon Objects of excluded regions.
Output: True if the query point is both inside the regional polygon and outside all exlusions; False otherwise.
"""
if query_point.within(regional_inclusion):
# Check if the point co-occurs with city areas...
for city in regional_exclusions:
if query_point.within(city):
return False
return True
return False | 68e06b3d89e4783130f123d6c91dc5c43a9788ba | 3,651,059 |
def get_word_vector_list(doc, w2v):
"""Get all the vectors for a text"""
vectors = []
for word in doc:
try:
vectors.append(w2v.wv[word])
except KeyError:
continue
return vectors | f228c2100b6a622fdb677954257e2d1590dcc0ff | 3,651,060 |
def solve(lines, n):
"""Apply the rules specified in the input lines to the starting
pattern for n iterations.
The number of lit pixels in the final pattern is returned.
"""
rules = load_rulebook(lines)
pattern = START
for _ in range(n):
pattern = enhance(pattern, rules)
return sum([row.count('#') for row in pattern]) | 781c349bfa186ac04daea60fe7e954431787ea15 | 3,651,061 |
def _to_plotly_color(scl, transparence=None):
"""
converts a rgb color in format (0-1,0-1,0-1) to a plotly color 'rgb(0-255,0-255,0-255)'
"""
plotly_col = [255 * _c for _c in mplc.to_rgba(scl)] if len(scl) == 3 else [255 * _c for _c in mplc.to_rgb(scl)]
if transparence is not None:
assert 0. <= transparence <= 1.0
plotly_col[3] = transparence
return "rgba({:.0f}, {:.0f}, {:.0f}, {:.4f})".format(*plotly_col)
else:
return "rgb({:.0f}, {:.0f}, {:.0f})".format(*plotly_col[:3]) | 95b7686f913c69792e18f127176db68a3f72622f | 3,651,062 |
def dense_attention_block(seqs_repr, is_training, num_layers,
decay_variable, decay_constant,
units, dropout, query_dropout,
l2_scale, name=''):
"""
"""
for i in range(num_layers):
with tf.variable_scope('dense_attention{}'.format(i), reuse=tf.AUTO_REUSE):
#seqs_repr = tf.Print(seqs_repr, [tf.shape(seqs_repr)], "{}".format(i))
seqs_repr = attention_block(seqs_repr,
is_training,
decay_variable,
decay_constant,
dropout,
query_dropout,
l2_scale)
layer_reprs.append(seqs_repr)
return seqs_repr | db50dd5e4d8d61622a9f989ec0ae9c02c5a4cfe1 | 3,651,063 |
def generate_schema_type(app_name: str, model: object) -> DjangoObjectType:
"""
Take a Django model and generate a Graphene Type class definition.
Args:
app_name (str): name of the application or plugin the Model is part of.
model (object): Django Model
Example:
For a model with a name of "Device", the following class definition is generated:
Class DeviceType(DjangoObjectType):
Meta:
model = Device
fields = ["__all__"]
if a FilterSet exist for this model at '<app_name>.filters.<ModelName>FilterSet'
The filterset will be store in filterset_class as follow
Class DeviceType(DjangoObjectType):
Meta:
model = Device
fields = ["__all__"]
filterset_class = DeviceFilterSet
"""
main_attrs = {}
meta_attrs = {"model": model, "fields": "__all__"}
# We'll attempt to find a FilterSet corresponding to the model
# Not all models have a FilterSet defined so the function return none if it can't find a filterset
meta_attrs["filterset_class"] = get_filterset_for_model(model)
main_attrs["Meta"] = type("Meta", (object,), meta_attrs)
schema_type = type(f"{model.__name__}Type", (DjangoObjectType,), main_attrs)
return schema_type | b57cd78cce59dacf1fdb1d14c667405b6cfdcc90 | 3,651,064 |
def do_authorize():
"""
Send a token request to the OP.
"""
oauth2.client_do_authorize()
try:
redirect = flask.session.pop("redirect")
return flask.redirect(redirect)
except KeyError:
return flask.jsonify({"success": "connected with fence"}) | 1e19f501ac6da94058619e8dd5905d6cd2ab1a69 | 3,651,065 |
import re
def get_windows():
"""
Return all windows found by WM with CPU, fullscreen, process name, and class information.
"""
# Basic window information
result = check_output('nice -n 19 wmctrl -l -p', shell=True)
lines = [a for a in result.decode('utf8').split('\n') if a != '']
windows = [re.split(r'\s+', a, maxsplit=4) for a in lines]
# Window properties
window_index = {}
for window in windows:
window_id = window[0]
r = check_output('nice -n 19 xprop -id {}'.format(window_id), shell=True)
wm_classes = []
r_class = re.search(br'WM_CLASS\(STRING\) = (.*)\n', r)
if r_class:
wm_classes = re.findall('\"(.*?)\"', r_class.group(1).decode('ascii'))
fullscreen = b'WM_STATE_FULLSCREEN' in r
window_index[window_id] = (fullscreen, wm_classes)
# Basic process information
usable_lines = []
result = check_output('nice -n 19 top -b -n 2', shell=True)
lines = [a for a in result.decode('utf8').split('\n') if a != '']
first_found = False
for i, line in enumerate(lines):
r = re.search(r'PID\s+USER\s+PR\s+NI', line)
if r:
if first_found:
usable_lines = lines[i + 1:]
break
else:
first_found = True
processes = [re.split(r'\s+', a.strip()) for a in usable_lines]
process_index = {a[0]: (a[8], a[11]) for a in processes}
result = []
for window in windows:
cpu, name = process_index.get(window[2], (None, None))
fullscreen, wm_classes = window_index.get(window[0], None)
result.append(Window(*window, cpu=cpu, fullscreen=fullscreen, name=name,
wm_classes=wm_classes))
return result | dd0f6f702592cf7f2fdd8541959682890dcc271e | 3,651,066 |
import google
def get_google_open_id_connect_token(service_account_credentials):
"""Get an OpenID Connect token issued by Google for the service account.
This function:
1. Generates a JWT signed with the service account's private key
containing a special "target_audience" claim.
2. Sends it to the OAUTH_TOKEN_URI endpoint. Because the JWT in #1
has a target_audience claim, that endpoint will respond with
an OpenID Connect token for the service account -- in other words,
a JWT signed by *Google*. The aud claim in this JWT will be
set to the value from the target_audience claim in #1.
For more information, see
https://developers.google.com/identity/protocols/OAuth2ServiceAccount .
The HTTP/REST example on that page describes the JWT structure and
demonstrates how to call the token endpoint. (The example on that page
shows how to get an OAuth2 access token; this code is using a
modified version of it to get an OpenID Connect token.)
"""
service_account_jwt = (
service_account_credentials._make_authorization_grant_assertion())
request = google.auth.transport.requests.Request()
body = {
'assertion': service_account_jwt,
'grant_type': google.oauth2._client._JWT_GRANT_TYPE,
}
token_response = google.oauth2._client._token_endpoint_request(
request, OAUTH_TOKEN_URI, body)
return token_response['id_token'] | 08e483865d26772112ffaf9837692f001598ced5 | 3,651,067 |
def term_to_atoms(terms):
"""Visitor to list atoms in term."""
if not isinstance(terms, list):
terms = [terms]
new_terms = []
for term in terms:
if isinstance(term, And):
new_terms += term_to_atoms(term.to_list())
elif isinstance(term, Or):
new_terms += term_to_atoms(term.to_list())
elif isinstance(term, Not):
new_terms.append(term.child)
else:
new_terms.append(term)
return new_terms | 6262ea7b1df124a4717d1452a23c33175b5da7a8 | 3,651,068 |
def expr_max(argv):
"""
Max aggregator function for :class:`Expression` objects
Returns
-------
exp : :class:`Expression`
Max of given arguments
Examples
--------
>>> x = so.VariableGroup(10, name='x')
>>> y = so.expr_max(2*x[i] for i in range(10))
"""
return expr_nested(argv, 'max') | 182157a627b12db6c41c79a99f135a7a493d4410 | 3,651,069 |
def handle_size(bytes_in=False, bytes_out=False):
"""
a function that converts bytes to human readable form. returns a
string like: 42.31 TB. example:
your_variable_name = make_readable(value_in_bytes)
"""
tib = 1024 ** 4
gib = 1024 ** 3
mib = 1024 ** 2
kib = 1024
if bytes_in:
data = float(bytes_in)
if data >= tib:
symbol = 'TB'
new_data = data / tib
elif data >= gib:
symbol = 'GB'
new_data = data / gib
elif data >= mib:
symbol = 'MB'
new_data = data / mib
elif data >= kib:
symbol = 'KB'
new_data = data / kib
elif data >= 0:
symbol = ' B'
new_data = data
formated_data = "{0:.2f}".format(new_data)
converted_data = str(formated_data) + symbol
return converted_data
elif bytes_out:
symbol = bytes_out[-1].lower()
data = bytes_out[0:-1]
try:
bytes = int(data)
except Exception as e:
print("couldnt convert " + data + " to int!")
print(e)
exit()
if symbol == 't':
converted_data = bytes * tib
elif symbol == 'g':
converted_data = bytes * gib
elif symbol == 'm':
converted_data = bytes * mib
elif symbol == 'k':
converted_data = bytes * kib
else:
print("unsupported size type! expected t, g, m, or k!")
exit()
return converted_data | 6e2b3b758e1afc1cea43bbe7ac0c6179b1d32c5f | 3,651,070 |
def return_elapsed(gs):
"""Returns a description of the elapsed time of recent operations.
Args:
gs: global state.
Returns:
A dictionary containing the count, minimum elapsed time,
maximum elapsed time, average elapsed time, and list of elapsed time
records.
"""
assert isinstance(gs, global_state.GlobalState)
elapsed_list = []
elapsed_sum = 0.0
elapsed_min = None
elapsed_max = None
for elapsed_record in gs.get_elapsed():
duration = elapsed_record.elapsed_seconds
elapsed_list.append(
{'start_time': utilities.seconds_to_timestamp(
elapsed_record.start_time),
'what': elapsed_record.what,
'threadIdentifier': elapsed_record.thread_identifier,
'elapsed_seconds': duration})
elapsed_sum += duration
if (elapsed_min is None) or (elapsed_max is None):
elapsed_min = duration
elapsed_max = duration
else:
elapsed_min = min(elapsed_min, duration)
elapsed_max = max(elapsed_max, duration)
return {'count': len(elapsed_list),
'min': elapsed_min,
'max': elapsed_max,
'average': elapsed_sum / len(elapsed_list) if elapsed_list else None,
'items': elapsed_list} | af832a3bac239e24f610e39c5dee8fde6a1a25c8 | 3,651,071 |
def calculate_per_class_lwlrap(truth, scores):
"""Calculate label-weighted label-ranking average precision.
Arguments:
truth: np.array of (num_samples, num_classes) giving boolean ground-truth
of presence of that class in that sample.
scores: np.array of (num_samples, num_classes) giving the classifier-under-
test's real-valued score for each class for each sample.
Returns:
per_class_lwlrap: np.array of (num_classes,) giving the lwlrap for each
class.
weight_per_class: np.array of (num_classes,) giving the prior of each
class within the truth labels. Then the overall unbalanced lwlrap is
simply np.sum(per_class_lwlrap * weight_per_class)
"""
assert truth.shape == scores.shape
num_samples, num_classes = scores.shape
# Space to store a distinct precision value for each class on each sample.
# Only the classes that are true for each sample will be filled in.
precisions_for_samples_by_classes = np.zeros((num_samples, num_classes))
for sample_num in range(num_samples):
pos_class_indices, precision_at_hits = (
_one_sample_positive_class_precisions(scores[sample_num, :],
truth[sample_num, :]))
precisions_for_samples_by_classes[sample_num, pos_class_indices] = (
precision_at_hits)
labels_per_class = np.sum(truth > 0, axis=0)
weight_per_class = labels_per_class / float(np.sum(labels_per_class))
# Form average of each column, i.e. all the precisions assigned to labels in
# a particular class.
per_class_lwlrap = (np.sum(precisions_for_samples_by_classes, axis=0) /
np.maximum(1, labels_per_class))
# overall_lwlrap = simple average of all the actual per-class, per-sample precisions
# = np.sum(precisions_for_samples_by_classes) / np.sum(precisions_for_samples_by_classes > 0)
# also = weighted mean of per-class lwlraps, weighted by class label prior across samples
# = np.sum(per_class_lwlrap * weight_per_class)
return per_class_lwlrap, weight_per_class | 7cc9187f96d0899d0ce554164df553cc9b5f79a0 | 3,651,072 |
from typing import Tuple
def chan_faces(n1: int, n2: int, xform,
dim1: Tuple[float, float, float, float],
dim2: Tuple[float, float, float, float]):
"""
^y
| 0--------7
| | |
| | 5-----6
| | |
+--|--|-------> z
| |
| 4-----3
| |
1--------2
<----> e/zsc
<--------> bflange
"""
faces = [
# front face
[0, 5, 6, 7],
[0, 1, 4, 5],
[1, 2, 3, 4],
# back face
[8, 13, 14, 15],
[8, 9, 12, 13],
[9, 10, 11, 12],
# around the C (counter-clockwise)
[0, 8, 9, 1],
[1, 9, 10, 2],
[2, 10, 11, 3],
[3, 11, 12, 4],
[4, 12, 13, 5],
[5, 13, 14, 6],
[6, 14, 15, 7],
[7, 15, 8, 0],
]
points_list = []
for nid, dim in [(n1, dim1), (n2, dim2)]:
bflange, hall, tweb, tflange = dim
# distance from shear center to neutral axis
#zsc_na = 3 * bflange ** 2 / (6 * bflange + h) # per msc 2018 refman
zsc = 0. ## TODO: consider the shear center
#if 0: # pragma: no cover
# msc 2018 refman; p.670
#h = hall - tflange
#tw = tweb
#tf = tflange
#b = bflange - tw / 2.
#bf = bflange - tw
#hw = hall - 2. * tf
#A = 2 * tf * bf + (h + tf) * tw
#zc = bf * tf * (bf + tw) / A
#zsc = b**2 * tf / (2*b*tw + 1/3. * h * tf)
#E = zs - tw / 2.
#zna = zc + zsc
points = np.array([
[0., hall/2, zsc], # 0
[0., -hall/2, zsc], # 1
[0., -hall/2, zsc + bflange], # 2
[0., -hall/2 + tflange, zsc + bflange], # 3
[0., -hall/2 + tflange, zsc + tweb], # 4
[0., hall/2 - tflange, zsc + tweb], # 5
[0., hall/2 - tflange, zsc + bflange], # 6
[0., hall/2, zsc + bflange], # 7
]) # 16 x 3
pointsi = points @ xform + nid
points_list.append(pointsi)
return faces, np.vstack(points_list) | eb1b67dc3e0700adf1df83c7400e6fb076e0c3dd | 3,651,073 |
import hashlib
import zipfile
from datetime import datetime
def generate_manifest(name, p, h=None):
""" generate_manifest(name, p, h) -> mapping
Generates a mapping used as the manifest file.
:param name: a dotted package name, as in setup.py
:param p: the zip file with package content.
:param h: optional hash function to use.
:returns: the path to the created manifest file.
"""
if h is None:
h = hashlib.sha256
m = {}
fh = m["files"] = {}
order = []
with zipfile.ZipFile(p) as zf:
for fi in zf.filelist:
order.append(fi.filename)
hash_all = h()
for fn in sorted(order):
contents = zf.read(fn)
hash_all.update(contents)
fh[fn] = h(contents).hexdigest()
m["name"] = name
m["sum"] = hash_all.hexdigest()
m["date"] = datetime.datetime.now().isoformat()
return m | 13c10ae405dbc6fe5acf92180e7981d07fdb9c60 | 3,651,074 |
def benchrun(methods,
model,
case_args,
filename,
cpus=1,):
"""
Parameters
----------
methods : list of str
Voter systems to be assessed by the election model.
model : func
Election model running function as
>>> e = func(**kwargs)
Where
- `e` is an Election object
- `kwargs` is a dict of arguments generated by `case_args`
case_args : generator
Generator that creates the parametric arguments to input into the model.
Must accept argument `methods` --
>>> generator = case_args(methods)
>>> args = next(generator)
filename : str
Naming prefix for output files
cpus : int
Number of processes or CPU's to use
Returns
-------
df : Dataframe
Results for every election iteration assessed
"""
b = _BenchRunner(model=model, case_args=case_args, filename=filename)
if cpus > 1:
return b.runmult(methods, cpus=cpus)
else:
return b.run(methods) | 414c96deb9a8d2f64b6808323465f8647aa5e48a | 3,651,075 |
def retry( exceptions,times=3,sleep_second=0):
"""
Retry Decorator
Retries the wrapped function/method `times` times if the exceptions listed
in ``exceptions`` are thrown
:param times: The number of times to repeat the wrapped function/method
:type times: Int
:param Exceptions: Lists of exceptions that trigger a retry attempt
:type Exceptions: Tuple of Exceptions
"""
if not py.iterable(exceptions):exceptions=[exceptions]
def decorator(func):
def newfn(*args, **kwargs):
attempt = 0
while attempt < times:
try:
return func(*args, **kwargs)
except Exception as e:
for i in exceptions:
if isinstance(e,i):
log(
'Exception thrown when attempting to run %s, attempt '
'%d of %d' % (func, attempt, times),
exc_info=True
)
attempt += 1
if sleep_second:sleep(sleep_second)
break
else:#when no break
raise e
return func(*args, **kwargs)
return newfn
return decorator | de715a0f903386358265c3fe4a13f1d91bcb177e | 3,651,076 |
def positiveId(obj):
"""Return id(obj) as a non-negative integer."""
result = id(obj)
if result < 0:
result += _address_mask
assert result > 0
return result | 5d3f987c621cf3d43ac31e9300a4d54ba208a7a0 | 3,651,077 |
import zipfile
import os
def get_vroitems_from_package(package):
"""Get all the items from the vRO Package.
Args:
package (str): Path to a package file.
Returns:
VROElementMetadata[]: a list of VROElementMetadata.
"""
vro_items_id, vro_items = [], []
with zipfile.ZipFile(package, 'r') as zip_ref:
for x in zip_ref.namelist():
if x.startswith("elements"):
item_id = os.path.basename(os.path.split(x)[0])
if item_id not in vro_items_id:
with zip_ref.open('elements/' + item_id + '/info', 'r') as xml_info_file:
xml_info = xml_info_file.read()
with zip_ref.open('elements/' + item_id + '/data', 'r') as data_file:
data = data_file.read()
vro_item = VROElementMetadata(item_id, xml_info, data)
vro_items.append(vro_item)
vro_items_id.append(item_id)
logger.info("New item %s" % vro_item)
return vro_items | 9e9257094f7da00da057dcb126b0355f2117281d | 3,651,078 |
def compute_annualized_total_return_over_months(df, column_price, months):
"""
Computed the annualized total return over the specified number of months.
This is equivalent to Compound Annual Growth Rate (CAGR).
Note: If the period is less than one year, it is best not to use annualized total return as it could result in a
very large (positive or negative) number that is not meaningful.
:param df: dataframe (sorted in ascending time order)
:param column_price: name of source column in dataframe with price values (adjusted for splits and dividends) to
compute annualized total return
:param months: time period in months (e.g. 1 = 1 month, 2 = 2 months, 2.5 = 1 month and ~15 days, etc.)
:return: annualized total return over months
"""
# calculate cumulative total return
total_return = compute_cumulative_total_return(df, column_price)
# calculate annualized total returns over months
annualized_total_return = ((1 + total_return)**(12/months)) - 1
return annualized_total_return | a75886ae85ab5bb146d93bd159a0a2a32f950678 | 3,651,079 |
from functools import reduce
def build_sparse_ts_from_distributions(start_date, end_date, seasonalities, time_interval, dist_dict, **kwargs):
"""constructs a time series with given distributions and seasonalities in a given frequency time_interval"""
ts_list = []
for (name, dist), seasonality in zip(dist_dict.items(), seasonalities):
ts_list.append(build_sparse_ts_by_seasonality(dist, start_date, end_date, seasonality, time_interval,
**kwargs.get(name, {})))
ts = reduce(lambda x, y: add_ts_with_different_dates(x, y), ts_list) # add time series together
return ts | 81d2ebc32a2b62ed967377faf90b3b58e7c753ff | 3,651,080 |
def preprocess_label(labels, scored_classes, equivalent_classes):
""" convert string labels to binary labels """
y = np.zeros((len(scored_classes)), np.float32)
for label in labels:
if label in equivalent_classes:
label = equivalent_classes[label]
if label in scored_classes:
y[scored_classes.index(label)] = 1
return y | 3e2465bb0db04afaaca0576f6c97847bd0fd2b2e | 3,651,081 |
from typing import Iterable
import os
from typing import cast
import platform
import stat
def compile_on_disk(source_file: str,
parser_name: str = '',
compiler_suite: str = "",
extension: str = ".xml") -> Iterable[Error]:
"""
Compiles the a source file with a given compiler and writes the
result to a file.
If no ``compiler_suite`` is given it is assumed that the source
file is an EBNF grammar. In this case the result will be a Python
script containing a parser for that grammar as well as the
skeletons for a preprocessor, AST transformation table, and compiler.
If the Python script already exists only the parser name in the
script will be updated. (For this to work, the different names
need to be delimited section marker blocks.). `compile_on_disk()`
returns a list of error messages or an empty list if no errors
occurred.
:param source_file: The file name of the source text to be compiled.
:param parser_name: The name of the generated parser. If the empty
string is passed, the default name "...Parser.py" will be used.
:param compiler_suite: The file name of the parser/compiler-suite
(usually ending with 'Parser.py'), with which the source file
shall be compiled. If this is left empty, the source file is
assumed to be an EBNF-Grammar that will be compiled with the
internal EBNF-Compiler.
:param extension: The result of the compilation (if successful)
is written to a file with the same name but a different extension
than the source file. This parameter sets the extension.
:returns: A (potentially empty) list of error or warning messages.
"""
filepath = os.path.normpath(source_file)
rootname = os.path.splitext(filepath)[0]
if not parser_name: parser_name = rootname + 'Parser.py'
f = None # Optional[TextIO]
with open(source_file, encoding="utf-8") as f:
source = f.read()
# dhpath = relative_path(os.path.dirname(rootname), DHPARSER_PARENTDIR)
compiler_name = as_identifier(os.path.basename(rootname))
if compiler_suite:
sfactory, pfactory, tfactory, cfactory = load_compiler_suite(compiler_suite)
compiler1 = cfactory()
else:
sfactory = get_ebnf_preprocessor # PreprocessorFactoryFunc
pfactory = get_ebnf_grammar # ParserFactoryFunc
tfactory = get_ebnf_transformer # TransformerFactoryFunc
cfactory = get_ebnf_compiler # CompilerFactoryFunc
compiler1 = cfactory() # Compiler
is_ebnf_compiler = False # type: bool
if isinstance(compiler1, EBNFCompiler):
is_ebnf_compiler = True
compiler1.set_grammar_name(compiler_name, source_file)
result, messages, _ = compile_source(source, sfactory(), pfactory(), tfactory(), compiler1)
if has_errors(messages):
return messages
elif is_ebnf_compiler:
# trans == get_ebnf_transformer or trans == EBNFTransformer:
# either an EBNF- or no compiler suite given
ebnf_compiler = cast(EBNFCompiler, compiler1) # type: EBNFCompiler
global SECTION_MARKER, RX_SECTION_MARKER, PREPROCESSOR_SECTION, PARSER_SECTION, \
AST_SECTION, COMPILER_SECTION, END_SECTIONS_MARKER, RX_WHITESPACE
f = None
try:
f = open(parser_name, 'r', encoding="utf-8")
source = f.read()
sections = split_source(parser_name, source)
intro, imports, preprocessor, _, ast, compiler, outro = sections
ast_trans_python_src = imports + ast
ast_trans_table = dict() # type: TransformationDict
try:
ast_trans_table = compile_python_object(ast_trans_python_src,
r'(?:\w+_)?AST_transformation_table$')
except Exception as e:
if isinstance(e, NameError):
err_str = 'NameError "{}" while compiling AST-Transformation. ' \
'Possibly due to a forgotten import at the beginning ' \
'of the AST-Block (!)'.format(str(e))
else:
err_str = 'Exception {} while compiling AST-Transformation: {}' \
.format(str(type(e)), str(e))
messages.append(Error(err_str, 0, CANNOT_VERIFY_TRANSTABLE_WARNING))
if is_logging():
with open(os.path.join(log_dir(), rootname + '_AST_src.py'), 'w',
encoding='utf-8') as f:
f.write(ast_trans_python_src)
messages.extend(ebnf_compiler.verify_transformation_table(ast_trans_table))
# TODO: Verify compiler
except (PermissionError, FileNotFoundError, IOError):
intro, imports, preprocessor, _, ast, compiler, outro = '', '', '', '', '', '', ''
finally:
if f:
f.close()
f = None
if RX_WHITESPACE.fullmatch(intro):
intro = '#!/usr/bin/env python3'
if RX_WHITESPACE.fullmatch(outro):
outro = read_template('DSLParser.pyi').format(NAME=compiler_name)
if RX_WHITESPACE.fullmatch(imports):
imports = DHParser.ebnf.DHPARSER_IMPORTS
if RX_WHITESPACE.fullmatch(preprocessor):
preprocessor = ebnf_compiler.gen_preprocessor_skeleton()
if RX_WHITESPACE.fullmatch(ast):
ast = ebnf_compiler.gen_transformer_skeleton()
if RX_WHITESPACE.fullmatch(compiler):
compiler = ebnf_compiler.gen_compiler_skeleton()
try:
f = open(parser_name, 'w', encoding="utf-8")
f.write(intro)
f.write(SECTION_MARKER.format(marker=SYMBOLS_SECTION))
f.write(imports)
f.write(SECTION_MARKER.format(marker=PREPROCESSOR_SECTION))
f.write(preprocessor)
f.write(SECTION_MARKER.format(marker=PARSER_SECTION))
f.write(cast(str, result))
f.write(SECTION_MARKER.format(marker=AST_SECTION))
f.write(ast)
f.write(SECTION_MARKER.format(marker=COMPILER_SECTION))
f.write(compiler)
f.write(SECTION_MARKER.format(marker=END_SECTIONS_MARKER))
f.write(outro)
except (PermissionError, FileNotFoundError, IOError) as error:
print(f'# Could not write file "{parser_name}" because of: '
+ "\n# ".join(str(error).split('\n)')))
print(result)
finally:
if f:
f.close()
if platform.system() != "Windows":
# set file permissions so that the parser_name can be executed
st = os.stat(parser_name)
os.chmod(parser_name, st.st_mode | stat.S_IEXEC)
else:
f = None
try:
f = open(rootname + extension, 'w', encoding="utf-8")
if isinstance(result, Node):
if extension.lower() == '.xml':
f.write(result.as_xml())
else:
f.write(result.as_sxpr())
elif isinstance(result, str):
f.write(result)
else:
raise AssertionError('Illegal result type: ' + str(type(result)))
except (PermissionError, FileNotFoundError, IOError) as error:
print('# Could not write file "' + rootname + '.py" because of: '
+ "\n# ".join(str(error).split('\n)')))
print(result)
finally:
if f:
f.close()
return messages | 39ec45c2dc65bd9e289b7fd8726f2efd6a128085 | 3,651,082 |
import logging
import yaml
def load_settings(filename='settings.yaml'):
"""Read settings from a file.
Keyword arguments:
filename -- the source file (default settings.yaml)
"""
with open(filename, 'r') as settings_yaml:
logging.debug("Reading settings from file: %s", filename)
return yaml.load(settings_yaml) | 42a983d048b79f2ed5e8744ec32486b44c8f8a82 | 3,651,083 |
import math
def vec_abs(field: SampledField):
""" See `phi.math.vec_abs()` """
if isinstance(field, StaggeredGrid):
field = field.at_centers()
return field.with_values(math.vec_abs(field.values)) | 91395513b7e457bdfdded484db1069e8c3b95805 | 3,651,084 |
def spoofRequest(app):
"""
Make REQUEST variable to be available on the Zope application server.
This allows acquisition to work properly
"""
_policy=PermissiveSecurityPolicy()
_oldpolicy=setSecurityPolicy(_policy)
newSecurityManager(None, OmnipotentUser().__of__(app.acl_users))
info = {'SERVER_NAME': 'isaw4.atlantides.org',
'SERVER_PORT': '8083',
'REQUEST_METHOD': 'GET'}
return makerequest(app, environ=info) | d1b3bd1a37d69f6500d23e55b5318b6519ed04be | 3,651,085 |
def data_to_percentage(data_list: pd.DataFrame) -> pd.DataFrame:
"""
Takes a dataframe with one or more columns filled with digits and returns a
dataframe with the percentages corresponding to the number of times the
numbers 1-9 appear in each column.
Args:
data_list: a dataframe of integers representing all of the leading
digits from a dataset (in this case, the number of vote counts).
Each columns is a category and is a Series with digits.
threshold: (int) minimum number of integers in column for percentage
to be found in it and for it to be returned.
Returns:
returns a dataframe of Series with the percentages of each column that
are each unique number in that column. Any numbers outside of [1, 9] are
not included and any column with fewer unique digits than another column
is dropped.
"""
def per_column_percentage(column: pd.Series) -> pd.Series:
number_of_occurrences = column.value_counts()
number_of_occurrences = number_of_occurrences[
(number_of_occurrences.index > 0)
& (number_of_occurrences.index < 10)
]
return number_of_occurrences.multiply(
100 / sum(number_of_occurrences)
).sort_index()
return data_list.apply(per_column_percentage).dropna(axis=1) | 18316ddf999419290d572e77d2934241359e45a3 | 3,651,086 |
from .models.models import EncoderClassifier
import torch
def create_classifier_from_encoder(data:DataBunch, encoder_path:str=None, path=None,
dropout1=0.5, device: torch.device = torch.device('cuda', 0), **kwargs):
"""Factory function to create classifier from encoder to allow transfer learning."""
path = data.path if path is None else path
if encoder_path is None:
logger.info("WARNING: `encoder_path` is None, not using pretrained feature extractor")
encoder = None
else:
encoder = torch.load(encoder_path, map_location='cpu')
model = EncoderClassifier(data.train_ds.shape, encoder, len(data.classes),dropout1=dropout1)
learn = Learner(data, model, path, model_type="classifier", device=device, **kwargs)
learn.freeze_encoder()
return learn | 736cfec768b6d659ab6fa1f087474a482409b66e | 3,651,087 |
from typing import Hashable
def filter_string(
df: pd.DataFrame,
column_name: Hashable,
search_string: str,
complement: bool = False,
case: bool = True,
flags: int = 0,
na=None,
regex: bool = True,
) -> pd.DataFrame:
"""Filter a string-based column according to whether it contains a substring.
This is super sugary syntax that builds on top of `pandas.Series.str.contains`.
It is meant to be the method-chaining equivalent of the following:
```python
df = df[df[column_name].str.contains(search_string)]]
```
This method does not mutate the original DataFrame.
Example: Retain rows whose column values contain a particular substring.
>>> import pandas as pd
>>> import janitor
>>> df = pd.DataFrame({"a": range(3, 6), "b": ["bear", "peeL", "sail"]})
>>> df
a b
0 3 bear
1 4 peeL
2 5 sail
>>> df.filter_string(column_name="b", search_string="ee")
a b
1 4 peeL
>>> df.filter_string(column_name="b", search_string="L", case=False)
a b
1 4 peeL
2 5 sail
Example: Filter names does not contain `'.'` (disable regex mode).
>>> import pandas as pd
>>> import janitor
>>> df = pd.Series(["JoseChen", "Brian.Salvi"], name="Name").to_frame()
>>> df
Name
0 JoseChen
1 Brian.Salvi
>>> df.filter_string(column_name="Name", search_string=".", regex=False, complement=True)
Name
0 JoseChen
:param df: A pandas DataFrame.
:param column_name: The column to filter. The column should contain strings.
:param search_string: A regex pattern or a (sub-)string to search.
:param complement: Whether to return the complement of the filter or not. If
set to True, then the rows for which the string search fails are retained
instead.
:param case: If True, case sensitive.
:param flags: Flags to pass through to the re module, e.g. re.IGNORECASE.
:param na: Fill value for missing values. The default depends on dtype of
the array. For object-dtype, `numpy.nan` is used. For `StringDtype`,
`pandas.NA` is used.
:param regex: If True, assumes `search_string` is a regular expression. If False,
treats the `search_string` as a literal string.
:returns: A filtered pandas DataFrame.
""" # noqa: E501
criteria = df[column_name].str.contains(
pat=search_string,
case=case,
flags=flags,
na=na,
regex=regex,
)
if complement:
return df[~criteria]
return df[criteria] | 9e5598a4afcff41ec5dc67c38b68efbacf3f09ec | 3,651,088 |
from typing import List
from typing import Dict
from typing import Union
import functools
def on_demand_feature_view(
features: List[Feature], inputs: Dict[str, Union[FeatureView, RequestDataSource]]
):
"""
Declare an on-demand feature view
:param features: Output schema with feature names
:param inputs: The inputs passed into the transform.
:return: An On Demand Feature View.
"""
def decorator(user_function):
on_demand_feature_view_obj = OnDemandFeatureView(
name=user_function.__name__,
inputs=inputs,
features=features,
udf=user_function,
)
functools.update_wrapper(
wrapper=on_demand_feature_view_obj, wrapped=user_function
)
return on_demand_feature_view_obj
return decorator | 0ea45df22cb167ad2aa919a0be40f2a11574a69a | 3,651,089 |
from typing import cast
def get_error_string(ftdi):
"""
get_error_string(context ftdi) -> char *
Get string representation for last error code
Parameters:
-----------
ftdi: pointer to ftdi_context
Returns:
--------
Pointer: to error string
"""
errstr = ftdi_get_error_string(ftdi)
return cast(errstr, c_char_p).value.decode('ascii') | e0d3eaa19014fff9840e7a8e629651107ae25495 | 3,651,090 |
def DenseNet52k12(growth_rate = 12,
reduction = 0.5):
"""
Parameters:
----------
Returns
-------
"""
return DenseNet(reduction = reduction,
growth_rate = growth_rate,
layers=52) | a295bcae685ae36bcbe356099d404403f7b8c0b6 | 3,651,091 |
def construct_fid_mask(catalog):
"""
Constructs the fidelity mask based off my results, not Robertos
:param catalog:
:return:
"""
line_widths = [i for i in range(3, 21, 2)]
fid_catalog = load_table("fidelity_snr.out", start=0)
fid_limit = 0.4
six_fids = []
for width in line_widths:
f = interp1d(fid_catalog["fbin"], fid_catalog["pure{}".format(width)], kind='slinear')
xdata = np.linspace(5.85, 7.85, 10000)
six_fids.append(xdata[np.argmax(f(xdata) >= fid_limit)])
masks = []
line_widths = [i for i in range(3, 21, 2)]
#six_fids = [6.3, 6.2, 6.1, 6.15, 6.1, 6.20, 6.1, 6.20, 6.05]
# six_fids = [6.35, 6.25, 6.15, 6.15, 6.15, 6.25, 6.15, 6.25, 6.05]
# six_fids = [6.25, 6.2, 6.1, 6.1, 6.1, 6.15, 6.1, 6.15, 6.05]
for index, width in enumerate(line_widths):
print(six_fids[index])
masks.append(catalog[((catalog['width'] == width) & (catalog['rsnrrbin'] >= six_fids[index]))])
total = masks[0]
t_sum = 0
for mask in masks[1:]:
t_sum += len(mask)
total = vstack((total, mask))
print("Total One: {}".format(len(total)))
return total | 81f50ae4dd092482eb406bef331075245989d2f3 | 3,651,092 |
import os
import sys
def get_template(filename):
"""
return html mail template
"""
current_dir = os.path.dirname(__file__)
tpl = read_file(os.path.join(current_dir,'templates',filename))
if not tpl:
_log('Mailer error: could not load file "%s"'%filename)
sys.exit(1)
return tpl | 1a253d02c6fc90e13088d530c8e02272c8bcc28c | 3,651,093 |
def _run_job(tgt, fun, arg, kwarg, tgt_type, timeout, retry):
"""
Helper function to send execution module command using ``client.run_job``
method and collect results using ``client.get_event_iter_returns``. Implements
basic retry mechanism.
If ``client.get_event_iter_returns`` return no results, ``_run_job`` will retry
the command until minions return results or ``retry`` threshold reached, in
latter case ``CommandExecutionError`` raised with job details
"""
ret = {}
attempt = 1
while attempt <= retry:
# publish job command
pub_data = client.run_job(
tgt=tgt, fun=fun, arg=arg, kwarg=kwarg, tgt_type=tgt_type, timeout=timeout
)
# collect job results
job_results = client.get_event_iter_returns(timeout=timeout, **pub_data)
for item in job_results:
ret.update(item)
if not set(pub_data["minions"]) == set(ret.keys()):
minions_no_return = set(pub_data["minions"]) - set(ret.keys())
log.warning(
"Nornir-runner:_run_job - {}s timeout; no results from {}; returned {}; jid {}; attempt: {}".format(
timeout,
list(minions_no_return),
list(ret.keys()),
pub_data["jid"],
attempt,
)
)
if ret:
break
attempt += 1
else:
raise CommandExecutionError(
"Nornir-runner:_run_job - no results from minions; tgt: {}; fun: {}; tgt_type: {}; timeout: {}; retry: {}; kwarg: {}".format(
tgt, fun, tgt_type, timeout, retry, kwarg
)
)
return ret | e23c189063e5d7df542d8e774acf655f4af61289 | 3,651,094 |
def _set_rank_colorbar(ax, img, norm):
""" Set color bar for rankshow on the right of the ax
"""
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(img, cax=cax)
y_tick_values = cax.get_yticks()
boundary_means = [np.mean((y_tick_values[ii],y_tick_values[ii-1]))
for ii in range(1, len(y_tick_values))]
print(norm.boundaries)
category_names = [(str(norm.boundaries[ii-1])+'~'+
str(norm.boundaries[ii]))
for ii in range(1, len(norm.boundaries))]
# category_names[0] = '<='+str(norm.boundaries[1])
category_names[-1] = '>'+str(norm.boundaries[-2])
cax.yaxis.set_ticks(boundary_means)
cax.yaxis.set_ticklabels(category_names,rotation=0)
return cax | f21f59ac69c79cf9449d28710abdeeb730004077 | 3,651,095 |
from typing import Optional
from pathlib import Path
import importlib
def destination(stub: str) -> Optional[Path]:
"""Determine stub path
Only handle micropython stubs, ignoring
any cPython stdlib equivalents.
"""
prefix, _, suffix = stub.partition(".")
if importlib.util.find_spec(prefix): # type: ignore
return # in cPython stdlib, skip
prefix = Path(prefix)
if suffix in ("py", "pyi"): # module
return prefix / f"__init__.{suffix}"
return prefix / suffix | 8b2552513dbeaa9dc09cb85703b736e17c4788b5 | 3,651,096 |
from os.path import join, isfile
from wpylib.sugar import is_iterable
from wpylib.file.file_utils import list_dir_entries
from pyqmc.results.gafqmc_info import is_gafqmc_info
def is_gafqmc_result_dir(D, files=None, dirs=None,
file_pattern=None, parse_file=True):
"""Tests whether the directory D, containing `files' (and softlinks)
and directories `dirs' is a result directory for a GAFQMC-type
calculation.
Returns the score of the test, where higher score means more
reliability.
Input arguments `files' and `dirs' are not necessary (in fact, not
recommended) unless you use this in conjunction with os.walk,
where the files and dirs would have been gathered during the
iteration cycle.
Return flag: an integer or-ed
1 = output file name exists
2 = AND output file does exists as a regular file
4 = AND output file is indeed a GAFQMC output file
8 = AND input filename exists that matches the output
"""
if files == None or dirs == None:
dirs, files = list_dir_entries(D)[:2]
rslt = 0
if file_pattern == None:
file_pattern = gafqmc_out_file_patterns
if isinstance(file_pattern, (set, tuple, list)) or is_iterable(file_pattern):
if not isinstance(file_pattern, set):
file_pattern = set(file_pattern)
fset = set(files)
fset_good = file_pattern & fset
if len(fset_good) > 0:
# WARNING: This will create uncertainty if there are more than one file
# matching the pattern. BE WARNED!
info_file = sorted(list(fset_good))[0]
rslt |= 1
else:
raise NotImplementedError
if rslt:
# At least the filename is found:
info_path = join(D, info_file)
if isfile(info_path):
rslt |= 2
if parse_file and is_gafqmc_info(info_path):
rslt |= 4
# the next if's are TO BE IMPLEMENTED LATER
return rslt | 938584931f4b1064dd2101e6230a134b9ce90a0b | 3,651,097 |
def train_IPCA(X,n_dims,batch_size,model='ipca'):
"""
name: train_IPCA
Linear dimensionality reduction using Singular Value Decomposition of
centered data, keeping only the most significant singular vectors to
project the data to a lower dimensional space.
returns: the transformer model
"""
estimator=transformer[model].set_params(pca__n_components=n_dims,pca__batch_size=batch_size)
estimator.fit(X)
return estimator | 282c885a562b5b3dbe356050ef5f270f49d7014d | 3,651,098 |
def _str_cell(cell: Cell) -> str:
"""Строковое представление клетки.
Данной строкой клетка будет выводится на экран.
"""
if cell.is_open:
if cell.is_empty:
return " "
elif cell.value:
return f" {cell.value} "
elif cell.is_flagged:
return "[F]"
else:
return "[ ]" | 2e4428196601a726b488e3ec4d966072033c5bfe | 3,651,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.