content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def height(grid):
"""Gets the height of the grid (stored in row-major order)."""
return len(grid)
|
b90bdb029518cfdaaa4bf93dd77b8996e646b322
| 706,125 |
import re
def numericalSort(value):
"""
複数ファイルの入力の際、ファイル名を昇順に並べる。
Input
------
value : 読み込みたいファイルへのパス
Output
------
parts : ファイル中の数字
"""
numbers = re.compile(r'(\d+)')
parts = numbers.split(value)
parts[1::2] = map(int, parts[1::2])
return parts
|
1fc8c748b37a89fe9ea3fb0283b5ec8012781028
| 706,127 |
def add_to_list(str_to_add, dns_names):
"""
This will add a string to the dns_names array if it does not exist.
It will then return the index of the string within the Array
"""
if str_to_add not in dns_names:
dns_names.append(str_to_add)
return dns_names.index(str_to_add)
|
4720708778fccc7a16dc66ad52ec911a5acb1f94
| 706,128 |
import inspect
def getNumArgs(obj):
"""Return the number of "normal" arguments a callable object takes."""
sig = inspect.signature(obj)
return sum(1 for p in sig.parameters.values()
if p.kind == inspect.Parameter.POSITIONAL_ONLY or
p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD)
|
c2e9edef0b2d8c18a0f9e2af90a6a1573705d590
| 706,129 |
def read_text(file, num=False):
""" Read from txt [file].
If [num], then data is numerical data and will need to convert each
string to an int.
"""
with open(file,'r') as f:
data = f.read().splitlines()
if num:
data = [int(i) for i in data]
return data
|
f9b61d254b1c2188ae6be3b9260f94f0657bcd3a
| 706,130 |
import operator
import re
def output_onto(conll_tokens, markstart_dict, markend_dict, file_name):
"""
Outputs analysis results in OntoNotes .coref XML format
:param conll_tokens: List of all processed ParsedToken objects in the document
:param markstart_dict: Dictionary from markable starting token ids to Markable objects
:param markend_dict: Dictionary from markable ending token ids to Markable objects
:return: serialized XML
"""
output_string = '<DOC DOCNO="' + file_name + '">\n<TEXT PARTNO="000">\n'
for out_tok in conll_tokens:
if int(out_tok.id) in markstart_dict:
for out_mark in sorted(markstart_dict[int(out_tok.id)], key=operator.attrgetter('end'), reverse=True):
output_string += '<COREF ID="' + str(out_mark.group) + '" ENTITY="' + out_mark.entity + '" INFSTAT="' + out_mark.infstat
if not out_mark.antecedent == "none":
output_string += '" TYPE="' + out_mark.coref_type
output_string += '">'
if int(out_tok.id) > 0:
output_string += re.sub("&","&",out_tok.text) if ";" not in out_tok.text else out_tok.text
if int(out_tok.id) in markend_dict:
for out_mark in markend_dict[int(out_tok.id)]:
output_string += "</COREF>"
if int(out_tok.id) > 0:
output_string += ' '
return output_string + "\n</TEXT>\n</DOC>\n"
|
f1a917e85735e9581326e60e3add94176e4f84cc
| 706,131 |
import logging
def create_provider_router(neutron_client, project_id):
"""Create the provider router.
:param neutron_client: Authenticated neutronclient
:type neutron_client: neutronclient.Client object
:param project_id: Project ID
:type project_id: string
:returns: Router object
:rtype: dict
"""
routers = neutron_client.list_routers(name='provider-router')
if len(routers['routers']) == 0:
logging.info('Creating provider router for external network access')
router_info = {
'router': {
'name': 'provider-router',
'tenant_id': project_id
}
}
router = neutron_client.create_router(router_info)['router']
logging.info('New router created: %s', (router['id']))
else:
logging.warning('Router provider-router already exists.')
router = routers['routers'][0]
return router
|
c9eb1de728d141d73c9f7b169df87c01829892f6
| 706,132 |
from typing import List
import shlex
def split(string: str) -> List[str]:
"""
Split string (which represents a command) into a list.
This allows us to just copy/paste command prefixes without having to define a full list.
"""
return shlex.split(string)
|
360fceeba7d6280e27068f61d2420cfd9fbfbcc2
| 706,133 |
def top1_accuracy(pred, y):
"""Main evaluation metric."""
return sum(pred.argmax(axis=1) == y) / float(len(y))
|
d011b432c7c04331ff09d16ba8151c8c4f056ead
| 706,134 |
def standardize_ants_data(ants_data, subject_ID_col):
""" Takes df from ANTs output and stadardizes column names for both left and right hemi
"""
ants_useful_cols = ['Structure Name']
ants_to_std_naming_dict = {}
ants_to_std_naming_dict['Structure Name'] = subject_ID_col #'SubjID'
for roi in ants_data.columns:
prefix = None
name_split = roi.split(' ')
if name_split[0] == 'left':
prefix = 'L'
if name_split[0] == 'right':
prefix = 'R'
if prefix is not None:
ants_useful_cols.append(roi)
std_name = prefix + '_' + ''.join(name_split[1:])
ants_to_std_naming_dict[roi] = std_name
ants_data_std = ants_data[ants_useful_cols].copy()
ants_data_std = ants_data_std.rename(columns=ants_to_std_naming_dict)
# Splitting SubjID column to ignore site name
_, ants_data_std[subject_ID_col] = ants_data_std[subject_ID_col].str.rsplit('_', 1).str
return ants_data_std
|
0f5216fd75244b0b9b60fdcdf05d63bfd02a2ed9
| 706,135 |
def removeElement_2(nums, val):
"""
Using one loop and two pointers
Don't preserve order
"""
# Remove the elment from the list
i = 0
j = len(nums) - 1
count = 0
while i < j:
if nums[i] == val:
while j > i and nums[j] == val:
j -= 1
print('i:', i, 'j:', j)
# swap elements
temp = nums[i]
nums[i] = nums[j]
nums[j] = temp
count += 1
print(nums)
i += 1
if count == 0:
j = j + 1
return j
|
e1a836514a09fc925a49b144880960b057dfff80
| 706,136 |
def worker_id():
"""Return a predefined worker ID.
Returns:
int: The static work id
"""
return 123
|
8c8e9c570a2355a15fd9a4d1d03d0159a33ffba0
| 706,137 |
import functools
import time
def time_profile(func):
"""Time Profiled for optimisation
Notes:
* Do not use this in production
"""
@functools.wraps(func)
def profile(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
print(f"{func.__name__} : {time.time() - start}")
return result
return profile
|
a4dde1d66f5987b4be1e9179da1570c252540363
| 706,138 |
import re
def remove_useless_lines(text):
"""Removes lines that don't contain a word nor a number.
Args:
text (string): markdown text that is going to be processed.
Returns:
string: text once it is processed.
"""
# Useless lines
useless_line_regex = re.compile(r'^[^\w\n]*$', re.MULTILINE | re.UNICODE)
processed_text = useless_line_regex.sub(r'', text)
return processed_text
|
fd33cdb243b6887d11846736f922bb4e1332d549
| 706,140 |
import string
def remove_punctuation(list_of_string, item_to_keep=""):
"""
Remove punctuation from a list of strings.
Parameters
----------
- list_of_string : a dataframe column or variable containing the text stored as a list of string sentences
- item_to_keep : a string of punctuation signs you want to keep in text (e.g., '!?.,:;')
"""
# Update string of punctuation signs
if len(item_to_keep) > 0:
punctuation_list = "".join(
c for c in string.punctuation if c not in item_to_keep
)
else:
punctuation_list = string.punctuation
# Remove punctuation from each sentence
transtable = str.maketrans("", "", punctuation_list)
return [sent.translate(transtable) for sent in list_of_string]
|
cb9190bc160f8e725479b531afab383c6857ceac
| 706,141 |
def get_search_keywords(testcase):
"""Get search keywords for a testcase."""
crash_state_lines = testcase.crash_state.splitlines()
# Use top 2 frames for searching.
return crash_state_lines[:2]
|
15c1611aeff33f9d8bba843f076b31abfb4023ba
| 706,142 |
def make_protein_index(proteins):
"""Indexes proteins
"""
prot_index = {}
skip = set(['sp', 'tr', 'gi', 'ref', ''])
for i, p in enumerate(proteins):
accs = p.accession.split('|')
for acc in accs:
if acc in skip:
continue
prot_index[acc] = i
return prot_index
|
be54ca3a123fe13efbb8c694187dd34d944fd654
| 706,143 |
def python(cc):
"""Format the character for a Python string."""
codepoint = ord(cc)
if 0x20 <= codepoint <= 0x7f:
return cc
if codepoint > 0xFFFF:
return "\\U%08x" % codepoint
return "\\u%04x" % codepoint
|
b0c2042c653043c0831a35ffc13d73850e29af2f
| 706,144 |
def reshape_signal_batch(signal):
"""Convert the signal into a standard batch shape for use with cochleagram.py
functions. The first dimension is the batch dimension.
Args:
signal (array): The sound signal (waveform) in the time domain. Should be
either a flattened array with shape (n_samples,), a row vector with shape
(1, n_samples), a column vector with shape (n_samples, 1), or a 2D
matrix of the form [batch, waveform].
Returns:
array:
**out_signal**: If the input `signal` has a valid shape, returns a
2D version of the signal with the first dimension as the batch
dimension.
Raises:
ValueError: Raises an error of the input `signal` has invalid shape.
"""
if signal.ndim == 1: # signal is a flattened array
out_signal = signal.reshape((1, -1))
elif signal.ndim == 2: # signal is a row or column vector
if signal.shape[0] == 1:
out_signal = signal
elif signal.shape[1] == 1:
out_signal = signal.reshape((1, -1))
else: # first dim is batch dim
out_signal = signal
else:
raise ValueError('signal should be flat array, row or column vector, or a 2D matrix with dimensions [batch, waveform]; found %s' % signal.ndim)
return out_signal
|
344ce1a9a695e99fa470a5d849afb40bc381c9df
| 706,145 |
def edges_are_same(a, b):
"""
Function to check if two tuple elements (src, tgt, val) correspond
to the same directed edge (src, tgt).
Args:
tuple_elements : a = (src, val, val) and b = (src, val, val)
Returns:
True or False
"""
if a[0:2] == b[0:2]:
return True
else:
return False
|
04c4d414402a57cafa0028d0ecd140bedd2539d7
| 706,146 |
def map_field_name_to_label(form):
"""Takes a form and creates label to field name map.
:param django.forms.Form form: Instance of ``django.forms.Form``.
:return dict:
"""
return dict([(field_name, field.label)
for (field_name, field)
in form.base_fields.items()])
|
dfc2779f498fb479553602a72d9520d398746302
| 706,147 |
import time
def compute(n=26):
""" Computes 2 to the power of n and returns elapsed time"""
start = time.time()
res = 0
for i in range(2**n):
res += 1
end = time.time()
dt = end - start
print(f'Result {res} in {dt} seconds!')
return dt
|
d816c587302830f0acd20a59905c8634fcf20b49
| 706,148 |
def _create_table():
"""helper for crc calculation"""
table = []
for i in range(256):
k = i
for _ in range(8):
if k & 1:
k = (k >> 1) ^ 0xEDB88320
else:
k >>= 1
table.append(k)
return table
|
830317e62dcfb7bca63f1186b46a2882e0bb399f
| 706,149 |
import re
def parse_path_params(end_point_path):
"""Parse path parameters."""
numeric_item_types = ['Lnn', 'Zone', 'Port', 'Lin']
params = []
for partial_path in end_point_path.split('/'):
if (not partial_path or partial_path[0] != '<' or
partial_path[-1] != '>'):
continue
# remove all non alphanumeric characters
param_name = re.sub('[^0-9a-zA-Z]+', '', partial_path.title())
if param_name in numeric_item_types:
param_type = 'integer'
else:
param_type = 'string'
params.append((param_name, param_type))
return params
|
895c3b3663c33a6883ba34d7bbfb20de1491910d
| 706,150 |
def read_pid_stat(pid="self"):
"""
Returns system process stat information.
:param pid: The process ID.
:returns: The system stat information.
:rtype: dict
"""
with open("/proc/%s/stat" % (pid,), "rb") as f:
stat = f.readline().split()
return {
"utime": int(stat[13]),
"stime": int(stat[14]),
"cutime": int(stat[15]),
"cstime": int(stat[16]),
}
|
5ec6b21b09372e71e6dcf8c60f418bcbc4beee64
| 706,151 |
import glob
import os
def get_terminal_map():
"""Get a map of device-id -> path as a dict.
Used by Process.terminal()
"""
ret = {}
ls = glob.glob('/dev/tty*') + glob.glob('/dev/pts/*')
for name in ls:
assert name not in ret, name
try:
ret[os.stat(name).st_rdev] = name
except FileNotFoundError:
pass
return ret
|
50a4f56e3e2db87a620ab97f485b776c6ac35b6c
| 706,152 |
def pow(x, n):
""" pow(x, n)
Power function.
"""
return x**n
|
09d62a68607bf0dab8b380a0c3ee58c6ed4497d6
| 706,153 |
def dot_product(u, v):
"""Computes dot product of two vectors u and v, each represented as a tuple
or list of coordinates. Assume the two vectors are the same length."""
output = 0
for i in range(len(u)):
output += (u[i]*v[i])
return output
|
6362776bef32870d3b380aecbb2037483e049092
| 706,154 |
def parse_variable_char(packed):
""" Map a 6-bit packed char to ASCII """
packed_char = packed
if packed_char == 0:
return ""
if 1 <= packed_char <= 10:
return chr(ord('0') - 1 + packed_char)
elif 11 <= packed_char <= 36:
return chr(ord('A') - 11 + packed_char)
elif 37 <= packed_char <= 62:
return chr(ord('a') - 37 + packed_char)
else:
return "_"
|
e4ff95bca48ae97a22c20dda4fd82e082c32be27
| 706,155 |
def next_page(context):
"""
Get the next page for signup or login.
The query string takes priority over the template variable and the default
is an empty string.
"""
if "next" in context.request.GET:
return context.request.GET["next"]
if "next" in context.request.POST:
return context.request.POST["next"]
if "next" in context:
return context["next"]
return ""
|
6abc1c8ef260366e53f335a27ee42f0356c91b63
| 706,156 |
import numpy
def make_train_test_sets(input_matrix, label_matrix, train_per_class):
"""Return ((training_inputs, training_labels), (testing_inputs, testing_labels)).
Args:
input_matrix: attributes matrix. Each row is sample, each column is attribute.
label_matrix: labels matrix. Each row is sample, each column is label.
train_per_class: Number of samples for each class in training set.
"""
training_inputs = []
training_labels = []
testing_inputs = []
testing_labels = []
label_counts = {}
# Add each row to training or testing set depending on count of labels
for input_, label in zip(input_matrix, label_matrix):
key = tuple(label)
try:
count = label_counts[key]
except KeyError:
# First time seeing label, count is 0
count = 0
if count < train_per_class:
# Still need more training samples for this label
training_inputs.append(input_)
training_labels.append(label)
else:
# We have enough training samples for this label,
# add to testing set instead
testing_inputs.append(input_)
testing_labels.append(label)
label_counts[key] = count + 1
if testing_inputs == []:
raise ValueError('train_per_class too high, no testing set')
return ((numpy.array(training_inputs), numpy.array(training_labels)),
(numpy.array(testing_inputs), numpy.array(testing_labels)))
|
bd71f48ed9405a89dfa42b3cb6cfe45b064a6b4d
| 706,157 |
def pi_float():
"""native float"""
lasts, t, s, n, na, d, da = 0, 3.0, 3, 1, 0, 0, 24
while s != lasts:
lasts = s
n, na = n+na, na+8
d, da = d+da, da+32
t = (t * n) / d
s += t
return s
|
8a6a6a5942ddd61ecdf65b782bb2fc0f0519ddb5
| 706,158 |
from typing import Counter
def top_diffs(spect: list, num_acids: int) -> list:
"""Finds at least num_acids top differences in [57, 200]
Accepts ties
:param spect: a cyclic spectrum to find differences in
:type spect: list (of ints)
:type keep: int
:returns: the trimmed leaderboard
:rtype: list (of lists (of ints))
"""
# must be sorted & start with 0
spect.sort()
if spect[0] != 0:
spect.insert(0, 0)
diffs = [spect[i] - spect[j] for i in range(1, len(spect))
for j in range(i - 1, -1, -1)]
acids = []
last_count = 0
for mass, count in Counter(diffs).most_common():
# leave if over min AND not tying min
if len(acids) >= num_acids and count < last_count:
break
# restricted weight for amino acid masses
if 57 <= mass <= 200:
acids.append(mass)
last_count = count
return acids
|
1ca2b08f6ecbf69b1ab2189b1cbbff9b4e1c2e8d
| 706,160 |
def add_placeholders(components):
"""Add placeholders for missing DATA/INSTANCE components"""
headers = [s[:2] for s in components]
for prefix in ("CD", "CR"):
if prefix not in headers:
components.append(prefix + ("C" * 11))
return components
|
303f1590042acc60aa753e5e317417de01fafafc
| 706,161 |
import hashlib
def md5(s, raw_output=False):
"""Calculates the md5 hash of a given string"""
res = hashlib.md5(s.encode())
if raw_output:
return res.digest()
return res.hexdigest()
|
238c2a6c6b06a046de86e514698c7ef5622f770b
| 706,162 |
def round_to_thirty(str_time):
"""STR_TIME is a time in the format HHMM. This function rounds down to the nearest half hour."""
minutes = int(str_time[2:])
if minutes//30 == 1:
rounded = "30"
else:
rounded = "00"
return str_time[0:2] + rounded
|
37e8473dbb6e91fc47a03491c421967db231d4d0
| 706,163 |
import os
import shutil
def mkdir(path, reset=False):
"""Checks if directory exists and if not, create one.
Parameters
----------
reset: erase the content of the directory if exists
Returns
-------
the path
"""
if reset and os.path.exists(path):
shutil.rmtree(path)
try:
os.makedirs(path)
except FileExistsError:
pass
return path
|
64a868231cd3bd7199eef2ad19b2b7296e0c32fe
| 706,164 |
def remove_uoms(words):
"""
Remove uoms in the form of e.g. 1000m 1543m3
Parameters
----------
words: list of words to process
Returns
-------
A list of words where possible uom have been removed
"""
returnWords=[]
for word in words:
word=word.replace('.', '', 1)
word=word.replace(',', '', 1)
if word[0:len(word)-1].isnumeric()==False and word[0:len(word)-1].isdecimal()==False:
#we do not have a match on e.g. 1543m
if word[0:len(word)-2].isnumeric()==False and word[0:len(word)-2].isdecimal()==False:
#we do not have a match on e.g. 1543m3
#add it
returnWords.append(word)
return returnWords
|
cdb2caf274a58b61c57ebe4fba167ec6275ddf6f
| 706,165 |
def format_value(v):
"""
Formats a value to be included in a string.
@param v a string
@return a string
"""
return ("'{0}'".format(v.replace("'", "\\'"))
if isinstance(v, str) else "{0}".format(v))
|
8b8d5452ecf938b4e9e9956577f1a3f1102e49bc
| 706,166 |
def check_invalid(string,*invalids,defaults=True):
"""Checks if input string matches an invalid value"""
# Checks string against inputted invalid values
for v in invalids:
if string == v:
return True
# Checks string against default invalid values, if defaults=True
if defaults == True:
default_invalids = ['INC','inc','incomplete','NaN','nan','N/A','n/a','missing']
for v in default_invalids:
if string == v:
return True
# For valid strings
return False
|
6e9e20beebe8e0b0baed680219fd93453d7f4ce3
| 706,167 |
def get_more_spec_pos(tokens):
"""Return frequencies for more specific POS"""
# adverbs and preps, particles
adverbs = [t for t in tokens if t.full_pos == 'ADV']
apprart = [t for t in tokens if t.full_pos == 'APPRART']
postpos = [t for t in tokens if t.full_pos == 'APPO']
circum_pos = [t for t in tokens if t.full_pos == 'APZR']
compare_conj = [t for t in tokens if t.full_pos == 'KOKOM']
# foreign words, interjections
fremds = [t for t in tokens if t.full_pos == 'FM']
interj = [t for t in tokens if t.full_pos == 'ITJ']
# proper names and adjectives
prop_name = [t for t in tokens if t.full_pos == 'NE']
adja = [t for t in tokens if t.full_pos.startswith('ADJA')]
adjd = [t for t in tokens if t.full_pos.startswith('ADJA')]
# pronouns
dem_pro_s = [t for t in tokens if t.full_pos == 'PDS']
dem_pro_a = [t for t in tokens if t.full_pos == 'PDAT']
ind_pro_s = [t for t in tokens if t.full_pos == 'PIS']
ind_pro_a = [t for t in tokens if t.full_pos in ['PIAT','PIDAT']]
pers_pron = [t for t in tokens if t.full_pos == 'PPER']
poss_s = [t for t in tokens if t.full_pos == 'PPOSS']
poss_a = [t for t in tokens if t.full_pos == 'PPOSAT']
refl_pron = [t for t in tokens if t.full_pos == 'PRF']
inter_pron = [t for t in tokens if t.full_pos == ['PWS','PWAT','PWAV']]
all_prons = dem_pro_s+dem_pro_a+ind_pro_s+ind_pro_a+poss_s+poss_a+refl_pron+inter_pron
# compartives, punctuation
comp = [t for t in tokens if t.full_pos == 'TRUNC']
sent_int_interpunct = [t for t in tokens if t.full_pos == '$(']
# pronom adverbs and others
pro_adv = [t for t in tokens if t.full_pos == 'PROAV' and t.function == 'pp']
part_kvz = [t for t in tokens if t.full_pos == 'PTKVZ' and t.function == 'avz']
inf_with_zu = [t for t in tokens if t.full_pos == 'PTKVZ' and t.function == 'VVIZU']
for t in poss_s+poss_a:
t.pos_color.append('Poss pronouns')
for t in refl_pron:
t.pos_color.append('Refl pronouns')
return (len(adverbs), len(apprart), len(postpos), len(circum_pos), len(fremds), len(interj), \
len(prop_name), len(adja), len(adjd),
len(dem_pro_s), len(dem_pro_a), len(dem_pro_s)+len(dem_pro_a), len(ind_pro_s), len(ind_pro_a), \
len(ind_pro_s)+len(ind_pro_a),
len(pers_pron), len(poss_s), len(poss_a), len(poss_s)+len(poss_a), len(refl_pron), \
len(inter_pron), len(comp),
len(sent_int_interpunct), len(pro_adv), len(part_kvz), len(compare_conj), \
len(inf_with_zu), len(all_prons))
|
5ea2ae19d61c84ca8750999aa14a14dd426fe6f7
| 706,168 |
from pathlib import Path
def get_notebook_path(same_config_path, same_config_file_contents) -> str:
"""Returns absolute value of the pipeline path relative to current file execution"""
return str(Path.joinpath(Path(same_config_path).parent, same_config_file_contents["notebook"]["path"]))
|
4b9f8952bdb7c2308fdfa290ec108d432b6b6a0b
| 706,169 |
def get_registry_image_tag(app_name: str, image_tag: str, registry: dict) -> str:
"""Returns the image name for a given organization, app and tag"""
return f"{registry['organization']}/{app_name}:{image_tag}"
|
16c71f99ff3a3c2514c24cb417b93f3b88f7cf42
| 706,170 |
import os
import shutil
def save_wind_generated_waves_to_subdirectory(args):
""" Copy the wave height and wave period to the outputs/ directory.
Inputs:
args['wave_height'][sector]: uri to "sector"'s wave height data
args['wave_period'][sector]: uri to "sector"'s wave period data
args['prefix']: prefix to be appended to the new filename
Outputs:
data_uri: dictionary containing the uri where the data is saved
"""
intermediate_directory = \
os.path.join(args['intermediate_directory'], args['subdirectory'])
wave_height_list = args['wave_heights']
wave_period_list = args['wave_periods']
data_uri = {}
for wave_height_uri in wave_height_list:
shutil.copy(wave_height_uri, intermediate_directory)
for wave_period_uri in wave_period_list:
shutil.copy(wave_period_uri, intermediate_directory)
return data_uri
|
5b203f5237ebd9ac3fbddcecc5b9c609677eb5ae
| 706,171 |
def get_output_names(hf):
"""
get_output_names(hf)
Returns a list of the output variables names in the HDF5 file.
Args:
hf: An open HDF5 filehandle or a string containing the HDF5
filename to use.
Returns:
A sorted list of the output variable names in the HDF5 file.
"""
return sorted(map(str, hf['/output/data'].keys()))
|
6607197166c9a63d834398b188e996a811b081ce
| 706,172 |
def create_hostclass_snapshot_dict(snapshots):
"""
Create a dictionary of hostclass name to a list of snapshots for that hostclass
:param list[Snapshot] snapshots:
:return dict[str, list[Snapshot]]:
"""
snapshot_hostclass_dict = {}
for snap in snapshots:
# build a dict of hostclass+environment to a list of snapshots
# use this dict for the --keep-num option to know how many snapshots are there for each hostclass
if snap.tags and snap.tags.get('hostclass') and snap.tags.get('env'):
key_name = snap.tags.get('hostclass') + '_' + snap.tags.get('env')
hostclass_snapshots = snapshot_hostclass_dict.setdefault(key_name, [])
hostclass_snapshots.append(snap)
return snapshot_hostclass_dict
|
dd568eaeb76fee96a876b5a57d963cd2fc8f870e
| 706,173 |
import os
import yaml
def load_parameters(directory_name):
"""
Loads the .yml file parameters to a dictionary.
"""
root = os.getcwd()
directory = os.path.join(root, directory_name)
parameter_file_name = directory
parameter_file = open(parameter_file_name, 'r')
parameters = yaml.load(parameter_file, Loader=yaml.FullLoader)
parameter_file.close()
return parameters
|
793efa00af16851b78fd0f4277b24d03db76fe2c
| 706,174 |
from typing import List
import random
import math
def rsafactor(d: int, e: int, N: int) -> List[int]:
"""
This function returns the factors of N, where p*q=N
Return: [p, q]
We call N the RSA modulus, e the encryption exponent, and d the decryption exponent.
The pair (N, e) is the public key. As its name suggests, it is public and is used to
encrypt messages.
The pair (N, d) is the secret key or private key and is known only to the recipient
of encrypted messages.
>>> rsafactor(3, 16971, 25777)
[149, 173]
>>> rsafactor(7331, 11, 27233)
[113, 241]
>>> rsafactor(4021, 13, 17711)
[89, 199]
"""
k = d * e - 1
p = 0
q = 0
while p == 0:
g = random.randint(2, N - 1)
t = k
while True:
if t % 2 == 0:
t = t // 2
x = (g ** t) % N
y = math.gcd(x - 1, N)
if x > 1 and y > 1:
p = y
q = N // y
break # find the correct factors
else:
break # t is not divisible by 2, break and choose another g
return sorted([p, q])
|
21e655bc3f5b098da0d437a305baf89c70cebd56
| 706,175 |
import base64
def getFile(path):
"""
指定一个文件的路径,放回该文件的信息。
:param path: 文件路径
:return: PHP-> base64 code
"""
code = """
@ini_set("display_errors","0");
@set_time_limit(0);
@set_magic_quotes_runtime(0);
$path = '%s';
$hanlder = fopen($path, 'rb');
$res = fread($hanlder, filesize($path));
fclose($hanlder);
echo $res;
"""% path
return base64.b64encode(code.encode("UTF-8")).decode("UTF-8")
|
e44e3f90e5febee54d2f5de48e35f0b83acf9842
| 706,176 |
def get_all_child_wmes(self):
""" Returns a list of (attr, val) tuples representing all wmes rooted at this identifier
val will either be an Identifier or a string, depending on its type """
wmes = []
for index in range(self.GetNumberChildren()):
wme = self.GetChild(index)
if wme.IsIdentifier():
wmes.append( (wme.GetAttribute(), wme.ConvertToIdentifier()) )
else:
wmes.append( (wme.GetAttribute(), wme.GetValueAsString()) )
return wmes
|
fb66aef96ca5fd5a61a34a86052ab9014d5db8a4
| 706,177 |
import os
def reduce_scan(row, params, **kwargs):
"""
Reduce scan-mode grism data
.. warning::
This function is not yet implemented. It will raise an exception.
Parameters
----------
row : abscal.common.exposure_data_table.AbscalDataTable
Single-row table of the exposure to be extracted.
params : dict
Dictionary of parameters to use for the reduction
arg_list : namespace
Namespace of command-line arguments.
Returns
-------
row : abscal.common.exposure_data_table.AbscalDataTable
Updated single-row table of the exposure
"""
raise NotImplementedError("Scan mode is not yet available.")
default_values = get_defaults('abscal.common.args')
base_defaults = default_values | get_defaults(kwargs.get('module_name', __name__))
verbose = arg_list.verbose
show_plots = arg_list.plots
bkg_flat_order = arg_list.bkg_flat_order
file = os.path.join(row["path"], row["filename"])
with fits.open(file) as inf:
image = inf['SCI'].data
filter = row['filter']
xsize, ysize = image.shape[1], image.shape[0]
err = inf['ERR'].data
time = inf['TIME'].data
dq = inf['DQ'].data
return input_table
|
9a9e19f8a5a48d62181208562a4ecff526b41638
| 706,178 |
import os
import sys
import importlib
def module(spec):
""" Returns the module at :spec:
@see Issue #2
:param spec: to load.
:type spec: str
"""
cwd = os.getcwd()
if cwd not in sys.path:
sys.path.append(cwd)
return importlib.import_module(spec)
|
33928f92dddeee5fa8822e2a592d9a957867b5d9
| 706,179 |
def has_three_or_more_vowels(string):
"""Check if string has three or more vowels."""
return sum(string.count(vowel) for vowel in 'aeiou') >= 3
|
8b0b683ebe51b18bdc5d6f200b41794a4cb3a510
| 706,180 |
import os
def get_map_folderpath(detectionID):
"""
Make sure map directory exists and return folder location for maps to be
saved to.
"""
homedir = os.path.dirname(os.path.abspath(__file__))
if not os.path.exists('map'):
os.makedirs('map')
detection_folder = 'map/'+str(detectionID)
if not os.path.exists(detection_folder):
os.makedirs(detection_folder)
map_dirpath = os.path.join(homedir, detection_folder)
return(map_dirpath)
|
3fd3f0bae5d8152b9b46f4f99dc79e14b5318e76
| 706,181 |
import pathlib
def release_kind():
"""
Determine which release to make based on the files in the
changelog.
"""
# use min here as 'major' < 'minor' < 'patch'
return min(
'major' if 'breaking' in file.name else
'minor' if 'change' in file.name else
'patch'
for file in pathlib.Path('changelog.d').iterdir()
)
|
115f75c1e0f1e8b02916db518e3983462d9bc19c
| 706,182 |
import re
def edit_text_file(filepath: str, regex_search_string: str, replace_string: str):
"""
This function is used to replace text inside a file.
:param filepath: the path where the file is located.
:param regex_search_string: string used in the regular expression to find what has to be replaced.
:param replace_string: the string which will replace all matches found using regex_search_string.
:return: None
:raise RuntimeError: if regex_search_string doesn't find any match.
"""
# open the file and read the content
with open(filepath, "r") as f:
text_file = f.read()
# find all matches
matches = re.finditer(regex_search_string, text_file)
if matches is None:
raise RuntimeError("No match has been found using the given regex_search_string!")
# replace all matches with replace_string
for match in matches:
text_file = text_file.replace(match.group(0), replace_string)
# overwrite the file
with open(filepath, "w") as f:
f.write(text_file)
return None
|
e0f5945a96f755a9c289262c3d19552c0e1b40fd
| 706,183 |
def find_sums(sheet):
"""
Tallies the total assets and total liabilities for each person.
RETURNS:
Tuple of assets and liabilities.
"""
pos = 0
neg = 0
for row in sheet:
if row[-1] > 0:
pos += row[-1]
else:
neg += row[-1]
return pos, neg
|
351e13d6915288268a56d8292c470fe354fa9842
| 706,184 |
def read_links(title):
"""
Reads the links from a file in directory link_data.
Assumes the file exists, as well as the directory link_data
Args:
title: (Str) The title of the current wiki file to read
Returns a list of all the links in the wiki article with the name title
"""
with open(f"link_data/{title}", "r") as f:
read_data = f.read()
return read_data.split("\n")[:-1]
|
50f128bcf4cd36bc783bc848ab2e6b6280973ea3
| 706,185 |
import numpy
def writeFEvalsMaxSymbols(fevals, maxsymbols, isscientific=False):
"""Return the smallest string representation of a number.
This method is only concerned with the maximum number of significant
digits.
Two alternatives:
1) modified scientific notation (without the trailing + and zero in
the exponent)
2) float notation
:returns: string representation of a number of function evaluations
or ERT.
"""
#Compared to writeFEvals2?
#Printf:
# %[flags][width][.precision][length]specifier
assert not numpy.isnan(fevals)
if numpy.isinf(fevals):
return r'$\infty$'
#repr1 is the alternative scientific notation
#repr2 is the full notation but with a number of significant digits given
#by the variable precision.
# modified scientific notation:
#smallest representation of the decimal part
#drop + and starting zeros of the exponent part
repr1 = (('%.' + str(maxsymbols) + 'e') % fevals)
size1 = len(repr1)
tmp = repr1.split('e', 1)
tmp2 = tmp[-1].lstrip('+-0')
if float(tmp[-1]) < 0:
tmp2 = '-' + tmp2
tmp[-1] = tmp2
remainingsymbols = max(maxsymbols - len(tmp2) - 2, 0)
tmp[0] = (('%.' + str(remainingsymbols) + 'f') % float(tmp[0]))
repr1 = 'e'.join(tmp)
#len(repr1) <= maxsymbols is not always the case but should be most usual
tmp = '%.0f' % fevals
remainingsymbols = max(maxsymbols - len(tmp), 0)
repr2 = (('%.' + str(remainingsymbols) + 'f') % fevals)
tmp = repr2.split('.', 1)
if len(tmp) > 1:
tmp[-1] = tmp[-1].rstrip('0')
repr2 = '.'.join(tmp)
repr2 = repr2.rstrip('.')
#set_trace()
if len(repr1)-repr1.count('.') < len(repr2)-repr2.count('.') or isscientific:
return repr1
#tmp1 = '%4.0f' % bestalgdata[-1]
#tmp2 = ('%2.2g' % bestalgdata[-1]).split('e', 1)
#if len(tmp2) > 1:
# tmp2[-1] = tmp2[-1].lstrip('+0')
# tmp2 = 'e'.join(tmp2)
# tmp = tmp1
# if len(tmp1) >= len(tmp2):
# tmp = tmp2
# curline.append(r'\multicolumn{2}{c|}{%s}' % tmp)
return repr2
|
a5434c5f6e845473f2187b969e4fa42538a95633
| 706,186 |
def findConstell(cc):
"""
input is one character (from rinex satellite line)
output is integer added to the satellite number
0 for GPS, 100 for Glonass, 200 for Galileo, 300 for everything else?
author: kristine larson, GFZ, April 2017
"""
if (cc == 'G' or cc == ' '):
out = 0
elif (cc == 'R'): # glonass
out = 100
elif (cc == 'E'): # galileo
out = 200
else:
out = 300
return out
|
d7a85fc5f7324acdb5277fd6db458523cd4ad4b8
| 706,188 |
def get_hyperparams(data, ind):
"""
Gets the hyperparameters for hyperparameter settings index ind
data : dict
The Python data dictionary generated from running main.py
ind : int
Gets the returns of the agent trained with this hyperparameter
settings index
Returns
-------
dict
The dictionary of hyperparameters
"""
return data["experiment_data"][ind]["agent_hyperparams"]
|
3734f4cf00564a1aa7c852091d366e6e42b6d55b
| 706,189 |
def blend0(d=0.0, u=1.0, s=1.0):
"""
blending function trapezoid
d = delta x = xabs - xdr
u = uncertainty radius of xabs estimate error
s = tuning scale factor
returns blend
"""
d = float(abs(d))
u = float(abs(u))
s = float(abs(s))
v = d - u #offset by radius
if v >= s: #first so if s == 0 catches here so no divide by zero below
b = 0.0
elif v <= 0.0:
b = 1.0
else: # 0 < v < s
b = 1.0 - (v / s)
return b
|
d501db66c34f28421c1517dcd3052fa7b2ee8643
| 706,190 |
def check_win(mat):
"""
Returns either:
False: Game not over.
True: Game won, 2048 is found in mat
"""
if 2048 in mat: # If won, teriminal state is needed for RL agent
return True # Terminal state
else:
return False
|
0824bc059cfa32b275c7b63f98d22e8a5b667e06
| 706,191 |
def mtl_to_json(mtl_text):
""" Convert Landsat MTL file to dictionary of metadata values """
mtl = {}
for line in mtl_text.split('\n'):
meta = line.replace('\"', "").strip().split('=')
if len(meta) > 1:
key = meta[0].strip()
item = meta[1].strip()
if key != "GROUP" and key != "END_GROUP":
mtl[key] = item
return mtl
|
310be04e9fbf756e9cf5ead60e53aae974d2ed50
| 706,192 |
def endian_swap(word):
"""Given any string, swap bits and return the result.
:rtype: str
"""
return "".join([word[i:i+2] for i in [6, 4, 2, 0]])
|
dfca46a012602150957a0830cf30cc6b6790df80
| 706,193 |
def str_to_col_grid_lists(s):
"""
Convert a string to selected columns and selected grid ranges.
Parameters:
s: (str) a string representing one solution.
For instance, *3**9 means 2 out of 5 dimensions are selected; the second and the last columns are selected,
and their corresponding grid ranges are 3 and 9. The function will return (1, 4) and (3, 9).
Return:
selected_cols (list): list of columns selected as indicated by the string.
selected_ranges (list): list of grid ranges selected as indicated by the string.
"""
selected_cols, selected_ranges = [], []
for i in range(len(s)):
if s[i] != "*":
selected_cols.append(i)
selected_ranges.append(int(s[i]))
return selected_cols, selected_ranges
|
4f5c67afa0dc97070b08223acbe6764010fd213a
| 706,194 |
def _join_type_and_checksum(type_list, checksum_list):
"""
Join checksum and their correlated type together to the following format:
"checksums": [{"type":"md5", "checksum":"abcdefg}, {"type":"sha256", "checksum":"abcd12345"}]
"""
checksums = [
{
"type": c_type,
"checksum": checksum,
}
for c_type, checksum in zip(type_list, checksum_list)
]
return checksums
|
7f09ee72c6f51ad87d75a9b5e74ad8ef4776323f
| 706,195 |
import argparse
def handle_kv_string(val):
"""This method is used as type field in --filter argument in ``buildtest buildspec find``.
This method returns a dict of key,value pair where input is in format
key1=val1,key2=val2,key3=val3
Args:
val (str): Input string in ``key1=value1,key2=value2`` format that is processed into a dictionary type
Returns:
dict: A dict mapping of key=value pairs
"""
kv_dict = {}
if "," in val:
args = val.split(",")
for kv in args:
if "=" not in kv:
raise argparse.ArgumentTypeError("Must specify k=v")
key, value = kv.split("=")[0], kv.split("=")[1]
kv_dict[key] = value
return kv_dict
if "=" not in val:
raise argparse.ArgumentTypeError("Must specify in key=value format")
key, value = val.split("=")[0], val.split("=")[1]
kv_dict[key] = value
return kv_dict
|
ccc51c26fe881660606c49a1b84a67a796f4083a
| 706,197 |
from typing import Dict
from typing import Any
def decode_jwt(
jwt_string: str
) -> Dict[Any, Any]:
""" Decodes the given JWT string without performing any verification.
Args:
jwt_string (str): A string of the JWT to decode.
Returns:
dict: A dictionary of the body of the JWT.
"""
return jwt.decode( # type: ignore
jwt_string,
algorithms = ['ES256K'],
options={"verify_signature": False}
)
|
39b3e14a3eb63723b2a8df21d5252ea937b0a41b
| 706,198 |
import os
def get_file_without_path(file_name, with_extension=False):
"""
get the name of a file without its path
"""
base = os.path.basename(file_name)
if not with_extension:
base = os.path.splitext(base)[0]
return base
|
f6cf8c8003fe24a2b5ed265c3497bc866d201fb2
| 706,199 |
def with_key(output_key_matcher):
"""Check does it have a key."""
return output_key_matcher
|
5bcb64550ce202f66ac43325fe8876249b45c52d
| 706,200 |
import torch
from typing import List
def hidden_state_embedding(hidden_states: torch.Tensor, layers: List[int],
use_cls: bool, reduce_mean: bool = True) -> torch.Tensor:
"""
Extract embeddings from hidden attention state layers.
Parameters
----------
hidden_states
Attention hidden states in the transformer model.
layers
List of layers to use for the embedding.
use_cls
Whether to use the next sentence token (CLS) to extract the embeddings.
reduce_mean
Whether to take the mean of the output tensor.
Returns
-------
Tensor with embeddings.
"""
hs = [hidden_states[layer][:, 0:1, :] if use_cls else hidden_states[layer] for layer in layers]
hs = torch.cat(hs, dim=1) # type: ignore
y = hs.mean(dim=1) if reduce_mean else hs # type: ignore
return y
|
f732e834f9c3437a4a7278aa6b9bfc54589b093b
| 706,201 |
def apparent_attenuation(og, fg):
"""Apparent attenuation
"""
return 100.0 * (float(og) - float(fg)) / float(og)
|
e22ce07229baa4eacb7388280630d6097e21f364
| 706,203 |
def dropsRowsWithMatchClassAndDeptRemainderIsZero(df, Col, RemainderInt, classToShrink):
"""
Takes as input a dataframe, a column, a remainder integer, and a class within the column.
Returns the dataframe minus the rows that match the ClassToShrink in the Col and have a depth from the DEPT col with a remainder of zero.
"""
print("original lenght of dataframe = ", len(df))
df_new = df.drop(df[(df[Col] == classToShrink) & (df.index % 10 != 0)].index)
print("length of new dataframe after dropping rows = ", len(df_new))
print("number of rows dropped = ", len(df) - len(df_new))
print("length of 0 class is :", len(df_new[df_new[Col] == classToShrink]))
return df_new
|
f88ec5e8293d753defe0a6d31f083e52218011ba
| 706,204 |
def setlist(L):
""" list[alpha] -> set[alpha] """
# E : set[alpha]
E = set()
# e : alpha
for e in L:
E.add(e)
return E
|
7607d3d47ea5634773298afaea12d03759c0f1d4
| 706,205 |
def _p_value_color_format(pval):
"""Auxiliary function to set p-value color -- green or red."""
color = "green" if pval < 0.05 else "red"
return "color: %s" % color
|
ae58986dd586a1e6cd6b6281ff444f18175d1d32
| 706,207 |
import copy
def filter_parts(settings):
"""
Remove grouped components and glyphs that have been deleted or split.
"""
parts = []
temp = copy.copy(settings['glyphs'])
for glyph in settings['glyphs']:
name = glyph['class_name']
if name.startswith("_split") or name.startswith("_group") or name.startswith("_delete"):
parts.append(glyph)
temp.remove(glyph)
settings['glyphs'] = temp
# Remove from the training glyphs as well
temp2 = copy.copy(settings['training_glyphs'])
for glyph in settings['training_glyphs']:
name = glyph['class_name']
if name.startswith("_split") or name.startswith("_group") or name.startswith("_delete"):
temp2.remove(glyph)
settings['training_glyphs'] = temp2
return parts
|
f8d6a59eeeb314619fd4c332e2594dee3543ee9c
| 706,208 |
def getAccentedVocal(vocal, acc_type="g"):
"""
It returns given vocal with grave or acute accent
"""
vocals = {'a': {'g': u'\xe0', 'a': u'\xe1'},
'e': {'g': u'\xe8', 'a': u'\xe9'},
'i': {'g': u'\xec', 'a': u'\xed'},
'o': {'g': u'\xf2', 'a': u'\xf3'},
'u': {'g': u'\xf9', 'a': u'\xfa'}}
return vocals[vocal][acc_type]
|
cfec276dac32e6ff092eee4f1fc84b412c5c915c
| 706,209 |
def env_initialize(env, train_mode=True, brain_idx=0, idx=0, verbose=False):
""" Setup environment and return info """
# get the default brain
brain_name = env.brain_names[brain_idx]
brain = env.brains[brain_name]
# reset the environment
env_info = env.reset(train_mode=train_mode)[brain_name]
# examine the state space and action space
state = env_info.vector_observations[idx]
state_size = len(state)
action_size = brain.vector_action_space_size
if verbose:
# number of agents in the environment
print(f'Number of agents: {len(env_info.agents)}')
print(f'Number of actions: {action_size}')
print(f'States have length: {state_size}')
print(f'States look like: {state}')
return (brain, brain_name, state, action_size, state_size)
|
3c951a77009cca8c876c36965ec33781dd2c08dd
| 706,210 |
def _parse_tree_height(sent):
"""
Gets the height of the parse tree for a sentence.
"""
children = list(sent._.children)
if not children:
return 0
else:
return max(_parse_tree_height(child) for child in children) + 1
|
d6de5c1078701eeeb370c917478d93e7653d7f4f
| 706,211 |
def find_available_pacs(pacs, pac_to_unstuck=None, pac_to_super=None, pac_to_normal=None):
"""
Finds the available pacs that are not assigned
"""
available_pacs = pacs['mine']
if pac_to_unstuck is not None:
available_pacs = [x for x in available_pacs if x['id'] not in pac_to_unstuck.keys()]
if pac_to_super is not None:
available_pacs = [x for x in available_pacs if x['id'] not in pac_to_super.keys()]
if pac_to_normal is not None:
available_pacs = [x for x in available_pacs if x['id'] not in pac_to_normal.keys()]
return available_pacs
|
4b6674fd87db2127d5fffa781431ccc9a9ff775a
| 706,212 |
def create_indices(dims):
"""Create lists of indices"""
return [range(1,dim+1) for dim in dims]
|
1a83b59eb1ca2b24b9db3c9eec05db7335938cae
| 706,213 |
def indicator_selector(row, indicator, begin, end):
"""Return Tons of biomass loss."""
dasy = {}
if indicator == 4:
return row[2]['value']
for i in range(len(row)):
if row[i]['indicator_id'] == indicator and row[i]['year'] >= int(begin) and row[i]['year'] <= int(end):
dasy[str(row[i]['year'])] = row[i]['value']
return dasy
|
329411837633f4e28bea4b2b261b6f4149b92fb1
| 706,214 |
from datetime import datetime
def format_cell(cell, datetime_fmt=None):
"""Format a cell."""
if datetime_fmt and isinstance(cell, datetime):
return cell.strftime(datetime_fmt)
return cell
|
8d3fb41bb3d7d3f3b341482e2d050d32092118bf
| 706,216 |
def _default_geo_type_precision():
""" default digits after decimal for geo types """
return 4
|
eef082c8a8b38f4ede7bfb5d631b2679041b650c
| 706,217 |
def find_range_with_sum(values : list[int], target : int) -> tuple[int, int]:
"""Given a list of positive integers, find a range which sums to a target
value."""
i = j = acc = 0
while j < len(values):
if acc == target:
return i, j
elif acc < target:
acc += values[j]
j += 1
else:
acc -= values[i]
i += 1
return -1, -1
|
d54f185c98c03f985724a29471ecb1e301c14df5
| 706,218 |
def destagger(var, stagger_dim, meta=False):
"""Return the variable on the unstaggered grid.
This function destaggers the variable by taking the average of the
values located on either side of the grid box.
Args:
var (:class:`xarray.DataArray` or :class:`numpy.ndarray`): A variable
on a staggered grid.
stagger_dim (:obj:`int`): The dimension index to destagger.
Negative values can be used to choose dimensions referenced
from the right hand side (-1 is the rightmost dimension).
meta (:obj:`bool`, optional): Set to False to disable metadata and
return :class:`numpy.ndarray` instead of
:class:`xarray.DataArray`. Default is False.
Returns:
:class:`xarray.DataArray` or :class:`numpy.ndarray`:
The destaggered variable. If xarray is enabled and
the *meta* parameter is True, then the result will be a
:class:`xarray.DataArray` object. Otherwise, the result will be a
:class:`numpy.ndarray` object with no metadata.
"""
var_shape = var.shape
num_dims = var.ndim
stagger_dim_size = var_shape[stagger_dim]
# Dynamically building the range slices to create the appropriate
# number of ':'s in the array accessor lists.
# For example, for a 3D array, the calculation would be
# result = .5 * (var[:,:,0:stagger_dim_size-2]
# + var[:,:,1:stagger_dim_size-1])
# for stagger_dim=2. So, full slices would be used for dims 0 and 1, but
# dim 2 needs the special slice.
full_slice = slice(None)
slice1 = slice(0, stagger_dim_size - 1, 1)
slice2 = slice(1, stagger_dim_size, 1)
# default to full slices
dim_ranges_1 = [full_slice] * num_dims
dim_ranges_2 = [full_slice] * num_dims
# for the stagger dim, insert the appropriate slice range
dim_ranges_1[stagger_dim] = slice1
dim_ranges_2[stagger_dim] = slice2
result = .5*(var[tuple(dim_ranges_1)] + var[tuple(dim_ranges_2)])
return result
|
89bb08618fa8890001f72a43da06ee8b15b328be
| 706,219 |
from typing import List
from typing import Tuple
def calculateCentroid(
pointCloud : List[Tuple[float, float, float]]
) -> Tuple[float, float, float]:
"""Calculate centroid of point cloud.
Arguments
--------------------------------------------------------------------------
pointCloud (float 3-tuple list) -- list of xyz coordinates.
Returns
--------------------------------------------------------------------------
centroid (float 3-tuple) -- centroid of points in point cloud.
"""
numPoints = len(pointCloud)
x, y, z = [], [], []
for point in pointCloud:
x.append(point[0])
y.append(point[1])
z.append(point[2])
x, y, z = sum(x) / numPoints, sum(y) / numPoints, sum(z) / numPoints
return x, y, z
|
0e8d6d578a0a983fe1e68bff22c5cc613503ee76
| 706,220 |
def column_indexes(column_names, row_header):
"""項目位置の取得
Args:
column_names (str): column name
row_header (dict): row header info.
Returns:
[type]: [description]
"""
column_indexes = {}
for idx in column_names:
column_indexes[idx] = row_header.index(column_names[idx])
return column_indexes
|
4205e31e91cd64f833abd9ad87a02d91eebc8c61
| 706,221 |
def get_sql_query(table_name:str) -> str:
"""Fetch SQL query file for generation of dim or fact table(s)"""
f = open(f'./models/sql/{table_name}.sql')
f_sql_query = f.read()
f.close()
return f_sql_query
|
fc3308eae51b7d10667a50a0f4ee4e295bfea8d0
| 706,222 |
def fmt_bytesize(num: float, suffix: str = "B") -> str:
"""Change a number of bytes in a human readable format.
Args:
num: number to format
suffix: (Default value = 'B')
Returns:
The value formatted in human readable format (e.g. KiB).
"""
for unit in ["", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"]:
if abs(num) < 1024.0:
return "{:3.1f} {}{}".format(num, unit, suffix)
num /= 1024.0
return "{:.1f} {}{}".format(num, "Yi", suffix)
|
09b36d229856004b6df108ab1ce4ef0a9c1e6289
| 706,223 |
def submit_search_query(query_string, query_limit, query_offset,
class_resource):
"""
Submit a search query request to the RETS API
"""
search_result = class_resource.search(
query='%s' % query_string, limit=query_limit, offset=query_offset)
return search_result
|
f8c30c86f7ff7c33fc96b26b1491ddaa48710fbc
| 706,224 |
import subprocess
import os
import re
def is_enabled():
"""
Check if `ufw` is enabled
:returns: True if ufw is enabled
"""
output = subprocess.check_output(['ufw', 'status'],
universal_newlines=True,
env={'LANG': 'en_US',
'PATH': os.environ['PATH']})
m = re.findall(r'^Status: active\n', output, re.M)
return len(m) >= 1
|
fa922cdb87e35e1fc7cf77c5eba6a7da651ea070
| 706,225 |
import torch
from typing import Tuple
def get_median_and_stdev(arr: torch.Tensor) -> Tuple[float, float]:
"""Returns the median and standard deviation from a tensor."""
return torch.median(arr).item(), torch.std(arr).item()
|
d8fca5a97f00d14beecaa4b508442bc7a3637f86
| 706,226 |
import functools
def return_arg_type(at_position):
"""
Wrap the return value with the result of `type(args[at_position])`
"""
def decorator(to_wrap):
@functools.wraps(to_wrap)
def wrapper(*args, **kwargs):
result = to_wrap(*args, **kwargs)
ReturnType = type(args[at_position])
return ReturnType(result)
return wrapper
return decorator
|
30bf4e4a46b0b64b6cb5752286a13c0e6f7618df
| 706,227 |
def extend(s, var, val):
"""Copy dict s and extend it by setting var to val; return copy."""
try: # Python 3.5 and later
return eval('{**s, var: val}')
except SyntaxError: # Python 3.4
s2 = s.copy()
s2[var] = val
return s2
|
919e7102bf7f8766d9ddb9ea61a07ddd020d1bb8
| 706,228 |
def generate_arn(service, arn_suffix, region=None):
"""Returns a formatted arn for AWS.
Keyword arguments:
service -- the AWS service
arn_suffix -- the majority of the arn after the initial common data
region -- the region (can be None for region free arns)
"""
arn_value = "arn"
aws_value = "aws"
region_qualified = region if region else ""
return f"{arn_value}:{aws_value}:{service}:{region_qualified}:{arn_suffix}"
|
53dcf55c3fb15784770d1c2d62375d1e750469f8
| 706,229 |
def prod_list(lst):
"""returns the product of all numbers in a list"""
if lst:
res = 1
for num in lst:
res *= num
return res
else:
raise ValueError("List cannot be empty.")
|
8179e2906fb4b517d02972fd4647095d37caf6cd
| 706,230 |
from typing import Optional
import subprocess
import warnings
def get_current_git_hash(raise_on_error: bool = False) -> Optional[str]:
""" Return git hash of the latest commit
Parameters
----------
raise_on_error: bool, optional
If False (default), will return None, when it fails to obtain commit hash.
If True, will raise, when it fails to obtain commit hash.
Returns
-------
Short hash of the current HEAD or None.
"""
try:
git_hash = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD']).decode('utf-8').strip()
except subprocess.CalledProcessError:
if raise_on_error:
raise
warnings.warn('Probably not in a git repo.')
git_hash = None
return git_hash
|
7a17dc7e0574b7f3da91f009714226f855723427
| 706,231 |
def check_shift(start_time, end_time, final_minute, starting_minute, record):
"""
Função que verifica o turno da chamada e calcula o valor da ligação
:param start_time:
:param end_time:
:param final_minute:
:param starting_minute:
:return value:
"""
nem_start_time = start_time + (starting_minute / 60)
nem_end_time = end_time + (final_minute / 60)
call_time = (record['end'] - record['start']) // 60
if 6 < nem_start_time < 22:
if 6 < nem_end_time < 22:
# Portanto a ligação foi completada no periodo diurno
value = 0.36 + call_time * 0.09
else:
# Portanto a ligação iniciou no periodo diurno e terminou
# no periodo noturno
hour_max = 22
value = 0.36 + ((hour_max - nem_start_time) * 60) * 0.09
value = value + 0.36
else:
if not 6 < nem_end_time < 22:
# Portanto a ligação foi completada no periodo noturno
value = 0.36
else:
# Portanto a ligação iniciou no periodo noturno e terminou
# no periodo diurno
hour_min = 6
value = 0.36 + ((nem_end_time - hour_min) * 60) * 0.09
value = value + 0.36
return value
|
666883348347e8408b087ac63acd8608ff589a1c
| 706,232 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.