content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def get_prev_and_next_lexemes(request, current_lexeme):
"""Get the previous and next lexeme from the same language, ordered
by meaning and then alphabetically by form"""
lexemes = list(Lexeme.objects.filter(
language=current_lexeme.language).order_by(
"meaning", "phon_form", "romanised", "id"))
ids = [l.id for l in lexemes]
try:
current_idx = ids.index(current_lexeme.id)
except ValueError:
current_idx = 0
prev_lexeme = lexemes[current_idx - 1]
try:
next_lexeme = lexemes[current_idx + 1]
except IndexError:
next_lexeme = lexemes[0]
return (prev_lexeme, next_lexeme) | ca33f582049d055d35196595fd0c23a06fb0d791 | 3,656,275 |
def _sanitize_and_check(indexes):
"""
Verify the type of indexes and convert lists to Index.
Cases:
- [list, list, ...]: Return ([list, list, ...], 'list')
- [list, Index, ...]: Return _sanitize_and_check([Index, Index, ...])
Lists are sorted and converted to Index.
- [Index, Index, ...]: Return ([Index, Index, ...], TYPE)
TYPE = 'special' if at least one special type, 'array' otherwise.
Parameters
----------
indexes : list of Index or list objects
Returns
-------
sanitized_indexes : list of Index or list objects
type : {'list', 'array', 'special'}
"""
kinds = list({type(index) for index in indexes})
if list in kinds:
if len(kinds) > 1:
indexes = [
Index(com.try_sort(x)) if not isinstance(x, Index) else x
for x in indexes
]
kinds.remove(list)
else:
return indexes, "list"
if len(kinds) > 1 or Index not in kinds:
return indexes, "special"
else:
return indexes, "array" | 1c158934d49270fb17d99477082c49b7839c1fbb | 3,656,276 |
def get_tetranuc_freqs(given_seq):
"""
Returns dictionary mapping each of the 4^4 = 256 possible tetranucleotides
to its observed frequency in the given sequence.
Args:
given_seq:
Returns:
"""
return {tetranuc : get_observed_oligonuc_freq(given_seq, tetranuc) for tetranuc in TETRANUCLEOTIDES} | b2279248961b747526bedb14a7fcddf7015fde45 | 3,656,277 |
from typing import Optional
def _prv_keyinfo_from_wif(
wif: String, network: Optional[str] = None, compressed: Optional[bool] = None
) -> PrvkeyInfo:
"""Return private key tuple(int, compressed, network) from a WIF.
WIF is always compressed and includes network information:
here the 'network, compressed' input parameters are passed
only to allow consistency checks.
"""
if isinstance(wif, str):
wif = wif.strip()
payload = b58decode(wif)
net = network_from_key_value("wif", payload[:1])
if net is None:
raise BTClibValueError(f"invalid wif prefix: {payload[:1]!r}")
if network is not None and net != network:
raise BTClibValueError(f"not a {network} wif: {wif!r}")
ec = NETWORKS[net].curve
if len(payload) == ec.n_size + 2: # compressed WIF
compr = True
if payload[-1] != 0x01: # must have a trailing 0x01
raise BTClibValueError("not a compressed WIF: missing trailing 0x01")
prv_key = payload[1:-1]
elif len(payload) == ec.n_size + 1: # uncompressed WIF
compr = False
prv_key = payload[1:]
else:
raise BTClibValueError(f"wrong WIF size: {len(payload)}")
if compressed is not None and compr != compressed:
raise BTClibValueError("compression requirement mismatch")
q = int.from_bytes(prv_key, byteorder="big")
if not 0 < q < ec.n:
raise BTClibValueError(f"private key {hex(q)} not in [1, n-1]")
return q, net, compr | d9eef56ea212fafcd7aa5af718aa0b1280e9555d | 3,656,279 |
def build_cmake_defines(args, dirs, env_vars, stage):
"""
Generate cmake defines
:param args: The args variable generated by parse_parameters
:param dirs: An instance of the Directories class with the paths to use
:param env_vars: An instance of the EnvVars class with the compilers/linker to use
:param stage: What stage we are at
:return: A set of defines
"""
defines = {}
# Get slim defines if we are not building a full toolchain
if not args.full_toolchain:
defines.update(slim_cmake_defines())
# Add compiler/linker defines, which change based on stage
defines.update(cc_ld_cmake_defines(dirs, env_vars, stage))
# Add distribution specific defines
defines.update(distro_cmake_defines())
# Add project and target defines, which change based on stage
defines.update(project_cmake_defines(args, stage))
defines.update(target_cmake_defines(args, stage))
# Add other stage specific defines
defines.update(stage_specific_cmake_defines(args, dirs, stage))
# Add the vendor string if necessary
if args.clang_vendor:
defines['CLANG_VENDOR'] = args.clang_vendor
# Removes system dependency on terminfo to keep the dynamic library dependencies slim
defines['LLVM_ENABLE_TERMINFO'] = 'OFF'
return defines | 227fb680e42786356adbace344cea98433a29aab | 3,656,280 |
def server() -> None:
"""Старт сервера"""
class PredictionServicer(predictions_pb2_grpc.PredictionServicer):
def PredictIris(self, request, context):
response = predictions_pb2.PredictResponse()
response.iris_type = predictions.predict_iris(request.sepal_length,
request.sepal_width,
request.petal_length,
request.petal_width)
return response
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
predictions_pb2_grpc.add_PredictionServicer_to_server(PredictionServicer(), server)
print('Стартовал сервер. Порт 50051.')
server.add_insecure_port('[::]:50051')
# CTRL+C
server.start()
server.wait_for_termination() | eaa71a36763ffee0d6b201e0900b4f1fcf397fe9 | 3,656,281 |
def wasLastResponseHTTPError():
"""
Returns True if the last web request resulted in an erroneous HTTP code (like 500)
"""
threadData = getCurrentThreadData()
return threadData.lastHTTPError and threadData.lastHTTPError[0] == threadData.lastRequestUID | cbe2a21752387cfb5b0cba41ecc3bdbacbcdcbb3 | 3,656,282 |
import select
async def update_rates(
user_id: str = None,
client_id: str = None,
new_amount: str = None,
session: Session = Depends(get_session),
):
"""Update a rate."""
statement = (
select(Rate)
.where(Rate.user_id == user_id)
.where(Rate.client_id == client_id)
.where(Rate.is_active == True)
)
rate_to_update = session.exec(statement).one()
rate_to_update.amount = new_amount
session.add(rate_to_update)
session.commit()
session.refresh(rate_to_update)
return True | c5ef142dda27f27217d71ed811ce8b6f049a0d98 | 3,656,283 |
def taillight_detect(image):
""" Takes in a road image, re-sizes for the model,
predicts the lane to be drawn from the model in G color,
recreates an RGB image of a lane and merges with the
original road image.
"""
model = load_model('full_CNN_model.h5')
#image1=image
#image1=np.array(image1)
#objects=np.squeeze(image,2)
#rows,cols=objects.shape
rows, cols,_ = image.shape
#cols, rows = image.size
#cols=160
#rows=80
# Get image ready for feeding into model
small_img = cv2.resize(image, (160, 80))
#img_y_cr_cb = cv2.cvtColor(small_img, cv2.COLOR_BGR2YCrCb)
#y, cr, cb = cv2.split(img_y_cr_cb)
# Applying equalize Hist operation on Y channel.
#y_eq = cv2.equalizeHist(y)
#clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
#y_eq = clahe.apply(y)
#img_y_cr_cb_eq = cv2.merge((y_eq, cr, cb))
#small_img = cv2.cvtColor(img_y_cr_cb_eq, cv2.COLOR_YCR_CB2BGR)
#small_img = imresize(image, (80, 160, 3))
small_img = np.array(small_img)
small_img = small_img[None,:,:,:]
# Make prediction with neural network (un-normalize value by multiplying by 255)
prediction = model.predict(small_img)[0] * 255
#new_image = imresize(prediction, (rows, cols, 3))
mask = cv2.resize(prediction, (cols, rows))
img_y_cr_cb = cv2.cvtColor(image, cv2.COLOR_BGR2YCrCb)
y, cr, cb = cv2.split(img_y_cr_cb)
# Applying equalize Hist operation on Y channel.
#y_eq = cv2.equalizeHist(y)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
y_eq = clahe.apply(y)
img_y_cr_cb_eq = cv2.merge((y_eq, cr, cb))
image_he = cv2.cvtColor(img_y_cr_cb_eq, cv2.COLOR_YCR_CB2BGR)
gray = cv2.cvtColor(image_he, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (3, 3), 0)
auto = auto_canny(blurred)
for i in range(rows):
for j in range(cols):
if auto[i,j] >0 and mask [i,j]>100:
auto[i,j]=255
else:
auto[i,j]=0
cv2.imshow('histogram equalisation', auto)
cv2.waitKey(0)
#h, w = edges.shape[:2]
filled_from_bottom = np.zeros((rows, cols))
for col in range(cols):
for row in reversed(range(rows)):
if auto[row][col] < 255: filled_from_bottom[row][col] = 255
else: break
filled_from_top = np.zeros((rows, cols))
for col in range(cols):
for row in range(rows):
if auto[row][col] < 255: filled_from_top[row][col] = 255
else: break
filled_from_left = np.zeros((rows, cols))
for row in range(rows):
for col in range(cols):
if auto[row][col] < 255: filled_from_left[row][col] = 255
else: break
filled_from_right = np.zeros((rows, cols))
for row in range(rows):
for col in reversed(range(cols)):
if auto[row][col] < 255: filled_from_right[row][col] = 255
else: break
for i in range(rows):
for j in range(cols):
if filled_from_bottom[i,j] ==0 and filled_from_top[i,j]==0 and filled_from_right[i,j] ==0 and filled_from_left[i,j]==0:
auto[i,j]=255
else:
auto[i,j]=0
kernel = np.ones((5,5),np.uint8)
opening = cv2.morphologyEx(auto, cv2.MORPH_OPEN, kernel)
closing = cv2.morphologyEx(auto, cv2.MORPH_CLOSE, kernel)
mask = np.expand_dims(mask, 2)
mask = np.repeat(mask, 3, axis=2) # give the mask the same shape as your image
colors = {"red": [0.0,1.0,1.0], "blue": [0.,0.,0.1]} # a dictionary for your colors, experiment with the values
colored_mask = np.multiply(mask, colors["red"]) # broadcast multiplication (thanks to the multiplication by 0, you'll end up with values different from 0 only on the relevant channels and the right regions)
image = image+colored_mask # element-wise sum (sinc img and mask have the same shape)
#return image.astype(float) / 255
#return new_image
return auto | ee8849b59e94f8c395211af3537310ad7d2d8999 | 3,656,285 |
def generate_random_number(rng, length):
"""Return random number with predefined length."""
return crypto.random_generate(rng, length) | 2f3f5f290948c3eb063b46353a01a5edc17599e4 | 3,656,286 |
import ftplib
import tarfile
def update_old_names():
"""Fetches the list of old tz names and returns a mapping"""
url = urlparse(ZONEINFO_URL)
log.info('Connecting to %s' % url.netloc)
ftp = ftplib.FTP(url.netloc)
ftp.login()
gzfile = BytesIO()
log.info('Fetching zoneinfo database')
ftp.retrbinary('RETR ' + url.path, gzfile.write)
gzfile.seek(0)
log.info('Extracting backwards data')
archive = tarfile.open(mode="r:gz", fileobj=gzfile)
backward = {}
for line in archive.extractfile('backward').readlines():
if line[0] == '#':
continue
if len(line.strip()) == 0:
continue
parts = line.split()
if parts[0] != b'Link':
continue
backward[parts[2].decode('ascii')] = parts[1].decode('ascii')
return backward | a10f5985ea6fe6709816e757ee764138735eb077 | 3,656,287 |
from typing import Optional
def get_namespace(location: Optional[str] = None,
namespace_id: Optional[str] = None,
project: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNamespaceResult:
"""
Gets a namespace.
"""
__args__ = dict()
__args__['location'] = location
__args__['namespaceId'] = namespace_id
__args__['project'] = project
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('google-native:servicedirectory/v1:getNamespace', __args__, opts=opts, typ=GetNamespaceResult).value
return AwaitableGetNamespaceResult(
labels=__ret__.labels,
name=__ret__.name) | 70f59b6eb48e4952d19c5b96b9579f13c0e569fd | 3,656,288 |
def build_headers(access_token, client_id):
"""
:param access_token: Access token granted when the user links their account
:param client_id: This is the api key for your own app
:return: Dict of headers
"""
return {'Content-Type': 'application/json',
'Authorization': f'Bearer {access_token}',
'trakt-api-version': '2',
'trakt-api-key': client_id} | 5cd8ae3e06f67b7a4fdb1644ae82c62cb54479cb | 3,656,289 |
def odd_subgraph_centrality(i, lam, u):
"""
Calculates the number of odd length closed walks that a node participates in :cite:`estrada2005spectral`.
Used in the calculation of spectral scaling and generalized robustness index.
:param i: node index
:param lam: largest eigenvalue
:param u: largest eigenvector
:return: a float
"""
sc = 0
for j in range(len(lam)):
sc += np.power(u[i, j], 2) * np.sinh(lam[j])
return sc | eeb141ac56d9b70294bbf62a24739c73f3e4755e | 3,656,291 |
def PolyAreasToModel(polyareas, bevel_amount, bevel_pitch, quadrangulate):
"""Convert a PolyAreas into a Model object.
Assumes polyareas are in xy plane.
Args:
polyareas: geom.PolyAreas
bevel_amount: float - if > 0, amount of bevel
bevel_pitch: float - if > 0, angle in radians of bevel
quadrangulate: bool - should n-gons be quadrangulated?
Returns:
geom.Model
"""
m = geom.Model()
if not polyareas:
return m
polyareas.points.AddZCoord(0.0)
m.points = polyareas.points
for pa in polyareas.polyareas:
PolyAreaToModel(m, pa, bevel_amount, bevel_pitch, quadrangulate)
return m | c2243bca3d3cfa1168bde94dfe078d6cf3e86ad4 | 3,656,292 |
def preprocessing(train_data, test_data):
"""
* The method at first eliminates constant features from both train and test data.
* Then, it splits training data into features and labels.
* Finally, the method performs pca on training and testing data sets to reduce the dimension and
overcome curse of dimensionality problem.
Parameters
----------
train_data: training data set in data frame format
test_data: testing data set in data frame format
"""
# constant feature elimination
train_data = train_data.drop(['X3', 'X31', 'X32', 'X127', 'X128', 'X590'], axis=1)
train_data = np.asarray(train_data)
test_data = test_data.drop(['X3', 'X31', 'X32', 'X127', 'X128', 'X590'], axis=1)
test_data = np.asarray(test_data)
# training data is split into features and labels
train_x = train_data[:, :train_data.shape[1] - 1]
train_y = train_data[:, train_data.shape[1] - 1]
train_y.shape = (np.size(train_y), 1)
# principal component analysis
pca = PCA(n_components=60)
train_x_pca = pca.fit_transform(train_x)
test_pca = pca.transform(test_data)
return train_x_pca, train_y, test_pca | 9f6c01d64d393c9c9fe51925f11842b63098471f | 3,656,293 |
def generate_videos_from_events(response, video_model):
"""Creates the video containers/representations for this given response.
We should only really invoke this as part of a migration as of right now (2/8/2019),
but it's quite possible we'll have the need for dynamic upsertion later.
"""
seen_ids = set()
video_objects = []
Video = video_model
# Using a constructive approach here, but with an ancillary seen_ids list b/c Django models without
# primary keys are unhashable for some dumb reason (even though they have unique fields...)
for frame_id, event_data in response.exp_data.items():
if event_data.get("videoList", None) and event_data.get("videoId", None):
# We've officially captured video here!
events = event_data.get("eventTimings", [])
for event in events:
video_id = event["videoId"]
pipe_name = event["pipeId"] # what we call "ID" they call "name"
stream_time = event["streamTime"]
if (
video_id not in seen_ids
and pipe_name
and stream_time
and stream_time > 0
):
# Try looking for the regular ID first.
file_obj = S3_RESOURCE.Object(
settings.BUCKET_NAME, f"{video_id}.mp4"
)
try:
s3_response = file_obj.get()
except ClientError:
try: # If that doesn't work, use the pipe name.
file_obj = S3_RESOURCE.Object(
settings.BUCKET_NAME, f"{pipe_name}.mp4"
)
s3_response = file_obj.get()
except ClientError:
logger.warning(
f"could not find {video_id} or {pipe_name} in S3!"
)
continue
# Read first 32 bytes from streaming body (file header) to get actual filetype.
streaming_body = s3_response["Body"]
file_header_buffer: bytes = streaming_body.read(32)
file_info = fleep.get(file_header_buffer)
streaming_body.close()
video_objects.append(
Video(
pipe_name=pipe_name,
created_at=date_parser.parse(event["timestamp"]),
date_modified=s3_response["LastModified"],
# Can't get the *actual* pipe id property, it's in the webhook payload...
frame_id=frame_id,
full_name=f"{video_id}.{file_info.extension[0]}",
study=response.study,
response=response,
is_consent_footage=frame_id in VALID_CONSENT_FRAMES,
)
)
seen_ids.add(video_id)
return Video.objects.bulk_create(video_objects) | f5669fbc6466bf3cf1671d04a48bad4c5975f216 | 3,656,294 |
def datetime_at_midnight(dt: DateTime, tz: TimeZone) -> DateTime:
""" Returns a DateTime for the requested DateTime at midnight in the specified time zone.
Args:
dt (DateTime): the DateTime for which the new value at midnight should be calculated
tz (TimeZone): the TimeZone to use when interpreting the DateTime
Returns:
DateTime
Raises:
DHError
"""
try:
return _JDateTimeUtils.dateAtMidnight(dt, tz.value)
except Exception as e:
raise DHError(e) from e | 141988c9943911d165f5f3f8ade5536ae65881f2 | 3,656,295 |
def convert2sametype(dict_, formula):
"""Utility function for internal use.
Convert string/dict/DataFrame to dict
Parameters
----------
dict_ : dict
formula : string/dict/DataFrame
Returns
-------
type(formula)
"""
return convert2type(dict_, type(formula)) | d7393668e5bd22e8482bf4b99c6a789d322b80fb | 3,656,297 |
from typing import List
import gzip
def from_sdf(sdf_content: str = None, file_path: str = None, ignore_hydrogens = False) -> List[Graph]:
"""
parse graph from_sdf
Read chemical files and parses them into instances of `Graph`.
As this function is not meant to be called in a loop,
inner functions only relative to chemical files parsing are declared.
Type Aliases :
Atom = str
Bond = List[str]
"""
if file_path :
if (file_path.endswith('.gz')):
fp = gzip.open(file_path, 'rt', encoding='utf-8')
sdf_content = fp.read()
else :
with open(file_path, 'r') as content_file:
sdf_content = content_file.read()
return [
Mol_to_Graph(mol[0], mol[1])
for mol
in [
parse_Mol(mol_file, ignore_hydrogens)
for mol_file
in [
part[0]
for part
in [
compound.split('M END')
for compound
in sdf_content.split("$$$$")
if (compound.strip(' \t\n\r') != '')
]
if is_Mol(part)
]
]
] | 5676b98a699cfed00767f4d51dec27a7dc1a94ad | 3,656,298 |
from typing import Callable
def dispatcher_connect(
opp: OpenPeerPower, signal: str, target: Callable[..., None]
) -> Callable[[], None]:
"""Connect a callable function to a signal."""
async_unsub = run_callback_threadsafe(
opp.loop, async_dispatcher_connect, opp, signal, target
).result()
def remove_dispatcher() -> None:
"""Remove signal listener."""
run_callback_threadsafe(opp.loop, async_unsub).result()
return remove_dispatcher | 3dca8d6cf1f581a409c2b64e6c9a88e543fe0615 | 3,656,299 |
def get_last_error():
""" Get the last error value, then turn it into a nice string. Return the string. """
error_id = kernel32.GetLastError()
# No actual error
if error_id == 0:
return None
# Gonna need a string pointer
buf = ffi.new("LPWSTR")
chars = kernel32.FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, ffi.NULL, error_id , 0, buf, 0, ffi.NULL)
return ffi.string(ffi.cast("char **",buf)[0][0:chars]).decode('utf-8').strip("\r\n") | 424da4211cc5cb19c8143ffe5b4326b2ff440319 | 3,656,300 |
import pickle
def load_params_from_pkl(params_dump_file_path):
"""
Loads parameters from a pickle _dump file.
:param params_dump_file_path: self-explanatory
:return dict of param_name => param
"""
coll = {}
f = open(params_dump_file_path, 'rb')
while True:
try:
param_name, param_val = pickle.load(f)
coll[param_name] = param_val
except (EOFError, UnpicklingError):
break
f.close()
return coll | e0205d3f4b3d1ac5859eb91424a041273fc23cb8 | 3,656,301 |
def plot_by_term(term, df, kind='go', q=0.1, swarm=True,
x='genotype', y='b', gene='ens_gene'):
"""
Plot ontology terms by a given column.
Params:
term - term to look for in melted_df
df - a tidy dataframe with columns x and y
kind - the ontology to use
q - q-value for statistical significance. defaults to 0.1
swarm - if True, plots a swarmplot. Else, plots a violinplot.
x - column to plot on x axis
y - column to plot on y axis
gene - column in the given df where gene WBIDs are provided
Output:
ax - an axis object containing a graph
genes - a list of genes obtained from the melted df
"""
if type(kind) is not str:
raise ValueError('`kind` variable must be a string.')
if kind.lower() not in ['tissue', 'phenotype', 'go']:
raise ValueError('`kind` must be one of `tissue`, `phenotype` or `go`')
if type(term) is not str:
raise ValueError('`term` must be a string.')
if kind.lower() == 'tissue':
onto_df = tea.fetch_dictionary()
elif kind.lower() == 'phenotype':
onto_df = pd.read_csv('../input/phenotype_ontology.csv')
else:
onto_df = pd.read_csv('../input/go_dictionary.csv')
# melt the df:
melted_df = pd.melt(onto_df, id_vars='wbid', var_name='term',
value_name='expressed')
melted_df = melted_df[melted_df.expressed == 1]
# warnings and bells:
sel = melted_df.term.str.contains(term)
if len(melted_df[sel].term.unique()) > 1:
print('Warning: Provided term matches more than one ontological term.')
genes = melted_df[sel].wbid
if len(genes) == 0:
raise ValueError('Provided term is not in ontology dictionary')
ind = (df.qval < q) & (df[gene].isin(genes))
fig, ax = plt.subplots()
if swarm:
ax = sns.swarmplot(x='genotype', y='b', data=df[ind])
else:
ax = sns.violinplot(x='genotype', y='b', data=df[ind])
return ax, genes | 768b59ff479468902af429bdf8603455bad1eab3 | 3,656,303 |
def lab_equality(lab1, lab2):
"""
Check if two labs are identical
"""
if lab1["ncolumns"] != lab1["ncolumns"] or lab1["nlines"] != lab2["nlines"]:
return False
return all(set(lab1[cell]) == set(lab2[cell]) for cell in lab1.keys() if type(cell) != type("a")) | d5ffca9acfa6bc2cc324f1b6c5ed416541812c13 | 3,656,304 |
import attrs
def read_wwm(filename_or_fileglob, chunks={}, convert_wind_vectors=True):
"""Read Spectra from SWAN native netCDF format.
Args:
- filename_or_fileglob (str): filename or fileglob specifying multiple
files to read.
- chunks (dict): chunk sizes for dimensions in dataset. By default
dataset is loaded using single chunk for all dimensions (see
xr.open_mfdataset documentation).
- convert_wind_vectors (bool): choose it to convert wind vectors into
speed / direction data arrays.
Returns:
- dset (SpecDataset): spectra dataset object read from ww3 file.
Note:
- If file is large to fit in memory, consider specifying chunks for
'time' and/or 'station' dims.
"""
dset = xr.open_mfdataset(filename_or_fileglob, chunks=chunks)
_units = dset.AC.attrs.get("units", "")
dset = dset.rename(
{
"nfreq": attrs.FREQNAME,
"ndir": attrs.DIRNAME,
"nbstation": attrs.SITENAME,
"AC": attrs.SPECNAME,
"lon": attrs.LONNAME,
"lat": attrs.LATNAME,
"DEP": attrs.DEPNAME,
"ocean_time": attrs.TIMENAME,
}
)
# Calculating wind speeds and directions
if convert_wind_vectors and "Uwind" in dset and "Vwind" in dset:
dset[attrs.WSPDNAME], dset[attrs.WDIRNAME] = uv_to_spddir(
dset["Uwind"], dset["Vwind"], coming_from=True
)
# Setting standard names and storing original file attributes
set_spec_attributes(dset)
dset[attrs.SPECNAME].attrs.update(
{"_units": _units, "_variable_name": attrs.SPECNAME}
)
# Assigning spectral coordinates
#import ipdb; ipdb.set_trace()
dset[attrs.FREQNAME] = dset.spsig / (2 * np.pi) # convert rad to Hz
dset[attrs.DIRNAME] = dset.spdir
# converting Action to Energy density and adjust density to Hz
dset[attrs.SPECNAME] = dset[attrs.SPECNAME] * dset.spsig * (2 * np.pi)
# Converting from radians
dset[attrs.DIRNAME] *= R2D
dset[attrs.SPECNAME] /= R2D
# Returns only selected variables, transposed
to_drop = [
dvar
for dvar in dset.data_vars
if dvar
not in [
attrs.SPECNAME,
attrs.WSPDNAME,
attrs.WDIRNAME,
attrs.DEPNAME,
attrs.LONNAME,
attrs.LATNAME,
]
]
dims = [d for d in ["time", "site", "freq", "dir"] if d in dset.efth.dims]
return dset.drop(to_drop).transpose(*dims) | 4623a31a0d3d780960d58743278a847377213555 | 3,656,305 |
def is_sorted(t):
"""Checks whether a list is sorted.
t: list
returns: boolean
"""
return t == sorted(t) | 442c5a4670c595f3dea45c8aac315eda5dae26d0 | 3,656,306 |
def create_container_port_mappings(container):
"""
Create the port mappings for the given container.
:param container: The container to create the mappings for.
"""
ports = []
image = None
if container.is_image_based():
image = container.image
elif container.is_clone() and container.clone_of.is_image_based():
image = container.clone_of.image
if image:
protected_port = image.protected_port
public_ports = image.public_ports
if protected_port:
mapping = PortMapping(
server=container.server,
container=container,
external_port=PortMapping.get_available_server_port(container.server),
internal_port=protected_port
)
mapping.save()
ports.append({
ContainerBackend.PORT_MAPPING_KEY_ADDRESS: mapping.server.internal_ip,
ContainerBackend.PORT_MAPPING_KEY_EXTERNAL: mapping.external_port,
ContainerBackend.PORT_MAPPING_KEY_INTERNAL: mapping.internal_port
})
if public_ports:
for port in public_ports.split(','):
mapping = PortMapping(
server=container.server,
container=container,
external_port=PortMapping.get_available_server_port(container.server),
internal_port=port
)
mapping.save()
ports.append({
ContainerBackend.PORT_MAPPING_KEY_ADDRESS: '0.0.0.0',
ContainerBackend.PORT_MAPPING_KEY_EXTERNAL: mapping.external_port,
ContainerBackend.PORT_MAPPING_KEY_INTERNAL: mapping.internal_port
})
return ports | 15a93e38ccb2c3d6ecab025a8d3c9226ebbf81d0 | 3,656,307 |
from typing import Callable
from typing import Any
from typing import get_origin
from typing import Union
from typing import Dict
from typing import Sequence
from typing import Set
import time
from typing import Pattern
from typing import IO
from typing import Literal
from enum import Enum
def get_caster(typehint: TypeHint) -> Callable[..., Any]:
"""Returns a conversion class most appropriate for the
supplied type hint. Potential matches are checked in
order from most to least specific to account for
overlapping types (e.g. ABCs).
"""
if typehint in (Any, None):
return untyped_caster
origin = get_origin(typehint)
if origin in (Union, UnionType):
return union_caster(typehint)
typecasters: Dict[TypeHintTuple, Callable[..., Any]] = {
(bytes,): str.encode,
(str,): str,
(dict,): json_caster(typehint),
(bool,): bool_caster,
(Sequence, Set): collection_caster(typehint),
(date, time): datetime_caster(typehint),
(Pattern,): pattern_caster(typehint),
(IO, IOBase): io_caster(typehint),
(Literal,): literal_caster(typehint),
(Enum,): enum_caster(typehint),
}
for cls, caster in typecasters.items():
if typehint in cls:
return caster
if origin in cls and origin is not None:
return caster
if issubtype(typehint, cls):
return caster
return generic_caster(typehint) | 7171e70c2870169a5394d3bafc4d114f4a950db0 | 3,656,309 |
def values(series):
"""Count the values and sort.
series: pd.Series
returns: series mapping from values to frequencies
"""
return series.value_counts(dropna=False).sort_index() | d4ef6b93b7f2790d8130ac045e9c315b8d57a245 | 3,656,310 |
def use_id(type):
"""Declare that this configuration option should point to an ID with the given type."""
def validator(value):
check_not_templatable(value)
if value is None:
return core.ID(None, is_declaration=False, type=type)
if (
isinstance(value, core.ID)
and value.is_declaration is False
and value.type is type
):
return value
return core.ID(validate_id_name(value), is_declaration=False, type=type)
return validator | 0087ad7119999932c9d4b882907019f60346491f | 3,656,311 |
def social_auth_user(backend, uid, user=None, *args, **kwargs):
"""Return UserSocialAuth account for backend/uid pair or None if it
doesn't exists.
Raise AuthAlreadyAssociated if UserSocialAuth entry belongs to another
user.
"""
social_user = UserSocialAuth.get_social_auth(backend.name, uid)
if social_user:
if user and social_user.user != user:
msg = ugettext('This %(provider)s account is already in use.')
raise AuthAlreadyAssociated(backend, msg % {
'provider': backend.name
})
elif not user:
user = social_user.user
return {'social_user': social_user, 'user': user} | 5a421e3f1f24cecbb4e6313bee8172585f9f3708 | 3,656,312 |
def bbox_mask(t_arr, x_arr, limits):
"""
Just a wrapper for np.where
"""
#NOTE: t_arr is included but no longer used
mask = np.where(
(x_arr >= limits[0]) & \
(x_arr <= limits[1]))[0]
return mask | 90e1a92d76d1b3d406ab50300c6528973f610f0a | 3,656,313 |
import array
def kdeplot_2d_clevels(xs, ys, levels=11, **kwargs):
""" Plot contours at specified credible levels.
Arguments
---------
xs: array
samples of the first variable.
ys: array
samples of the second variable, drawn jointly with `xs`.
levels: float, array
if float, interpreted as number of credible levels to be equally
spaced between (0, 1); if array, interpreted as list of credible
levels.
xlow: float
lower bound for abscissa passed to Bounded_2d_kde (optional).
xigh: float
upper bound for abscissa passed to Bounded_2d_kde (optional).
ylow: float
lower bound for ordinate passed to Bounded_2d_kde (optional).
yhigh: float
upper bound for ordinate passed to Bounded_2d_kde (optional).
ax: Axes
matplotlib axes on which to plot (optional).
kwargs:
additional arguments passed to plt.contour().
"""
try:
xs = xs.values.astype(float)
ys = ys.values.astype(float)
except AttributeError:
pass
if all(~isfinite(xs)) or all(~isfinite(ys)):
return None
try:
len(levels)
f = 1 - np.array(levels)
except TypeError:
f = linspace(0, 1, levels+2)[1:-1]
if kwargs.get('auto_bound', False):
kwargs['xlow'] = min(xs)
kwargs['xhigh'] = max(xs)
kwargs['ylow'] = min(ys)
kwargs['yhigh'] = max(ys)
kde_kws = {k: kwargs.pop(k, None) for k in ['xlow', 'xhigh', 'ylow', 'yhigh']}
k = Bounded_2d_kde(np.column_stack((xs, ys)), **kde_kws)
size = max(10*(len(f)+2), 500)
c = np.random.choice(len(xs), size=size)
p = k(np.column_stack((xs[c], ys[c])))
i = argsort(p)
l = array([p[i[int(round(ff*len(i)))]] for ff in f])
Dx = np.percentile(xs, 99) - np.percentile(xs, 1)
Dy = np.percentile(ys, 99) - np.percentile(ys, 1)
x = linspace(np.percentile(xs, 1)-0.1*Dx, np.percentile(xs, 99)+0.1*Dx, 128)
y = linspace(np.percentile(ys, 1)-0.1*Dy, np.percentile(ys, 99)+0.1*Dy, 128)
XS, YS = meshgrid(x, y, indexing='ij')
ZS = k(np.column_stack((XS.flatten(), YS.flatten()))).reshape(XS.shape)
ax = kwargs.pop('ax', gca())
kwargs['colors'] = kwargs.get('colors', [kwargs.pop('color', None),])
ax.contour(XS, YS, ZS, levels=l, **kwargs) | 806b4278a6bbac91fcdfd6354cb3fa5422fab1ee | 3,656,314 |
def normalization_reg_loss(input):
"""
input: [..., 3]
It computes the length of each vector and uses the L2 loss between the lengths and 1.
"""
lengths = (input ** 2).sum(dim=-1).sqrt()
loss_norm_reg = ((lengths - 1) ** 2).mean()
return loss_norm_reg | 3b9d999c90d8e9b3ce797d286bb2f0b215fa7ee5 | 3,656,315 |
def _get_window_size(offset, step_size, image_size):
"""
Calculate window width or height.
Usually same as block size, except when at the end of image and only a
fracture of block size remains
:param offset: start columns/ row
:param step_size: block width/ height
:param image_size: image width/ height
:return: window width/ height
"""
if offset + step_size > image_size:
return image_size - offset
else:
return step_size | 90d65229c54a5878fa9b2af8e30293e743679e42 | 3,656,316 |
def _ListCtrl_IsSelected(self, idx):
"""
Returns ``True`` if the item is selected.
"""
return (self.GetItemState(idx, wx.LIST_STATE_SELECTED) & wx.LIST_STATE_SELECTED) != 0 | 796916c4cf13e77ec7f21cae2210acbb6d250e14 | 3,656,317 |
def sturm_liouville_function(x, y, p, p_x, q, f, alpha=0, nonlinear_exp=2):
"""Second order Sturm-Liouville Function defining y'' for Lu=f.
This form is used because it is expected for Scipy's solve_ivp method.
Keyword arguments:
x -- independent variable
y -- dependent variable
p -- p(x) parameter
p_x -- derivative of p_x wrt x
q -- q(x) parameter
f -- forcing function f(x)
alpha -- nonlinear parameter
nonlinear_exp -- exponent of nonlinear term
"""
y_x = y[1]
y_xx = -1*(p_x/p)*y[1] + (q/p)*y[0] + (q/p)*alpha*y[0]**nonlinear_exp - f/p
return [y_x, y_xx] | 5c34cc622075c640fe2dec03b1ae302192d0f779 | 3,656,318 |
def hamming_set(index: str, d: int = 1, include_N: bool = True):
"""Given an index of bases in {ACGTN}, generate all indexes within hamming
distance d of the input
:param index: string representing the index sequence
:param d: maximum distance to allow
:param include_N: include N when generating possible indexes
:return: set of indexes within hamming distance d
"""
base_d = {"A": 0, "C": 1, "G": 2, "T": 3, "N": 4}
new_base = [i * np.eye(len(index), dtype=np.uint8) for i in range(4 + include_N)]
other_bases = 1 - np.eye(len(index), dtype=np.uint8)
h_set = {tuple(base_d[c] for c in index)}
for _ in range(d):
for a in list(map(np.array, h_set)):
h_set.update(t for i in new_base for t in map(tuple, a * other_bases + i))
h_set = {"".join("ACGTN"[i] for i in h) for h in h_set}
return h_set | d8546bd2f7b04518d2d711488045670a60e449fe | 3,656,320 |
from mne import read_epochs
def _get_epochs_info(raw_fname):
"""Get epoch info."""
epochs = read_epochs(raw_fname)
return epochs.info | 5a7d20041e7b1de7b4dcf3540f7a9d01575fb8f9 | 3,656,321 |
def is_private(key):
"""
Returns whether or not an attribute is private.
A private attribute looks like: __private_attribute__.
:param key: The attribute key
:return: bool
"""
return key.startswith("__") and key.endswith("__") | 498e7522e95317dbb171961f0f5fe8350c29a69d | 3,656,322 |
async def img(filename) -> Response:
"""Image static endpoint."""
return await send_from_directory("img", filename) | d255f0f11f3b380f332a3165f8917d2d2cb65a6b | 3,656,323 |
def ref_genome_info(info, config, dirs):
"""Retrieve reference genome information from configuration variables.
"""
genome_build = info.get("genome_build", None)
(_, sam_ref) = get_genome_ref(genome_build, config["algorithm"]["aligner"],
dirs["galaxy"])
return genome_build, sam_ref | 382d32bddef76bb1ba1ecd6a4b39c042909ac3ed | 3,656,324 |
def load_text(file_arg):
"""
General function used to load data from a text file
"""
file_handle = validate_file_for_reading(file_arg)
try:
df = pd.io.parsers.read_csv(file_handle,delim_whitespace=True,\
comment='#', skip_blank_lines=True, engine='c')
except:
raise SortSeqError(\
'Could not interpret text file %s as dataframe.'%repr(file_handle))
return df.dropna(axis=0, how='all') | f8681d1db1819f2036f0b6304a04fd1762ad31f8 | 3,656,325 |
def entropy_from_mnemonic(mnemonic: Mnemonic, lang: str = "en") -> BinStr:
"""Convert mnemonic sentence to Electrum versioned entropy."""
# verify that it is a valid Electrum mnemonic sentence
_ = version_from_mnemonic(mnemonic)
indexes = _indexes_from_mnemonic(mnemonic, lang)
entropy = _entropy_from_indexes(indexes, lang)
return entropy | adcdfe3f66150f77276af7b4689289fe7609a253 | 3,656,326 |
def delete_data_analysis(analysis_id: UUID, token: HTTPAuthorizationCredentials = Depends(auth)):
"""
Delete a data analysis record.
You may only delete records in your private space,
or that are associated with a collab of which you are an administrator.
"""
return delete_computation(omcmp.DataAnalysis, analysis_id, token) | bc8cc6ee1174017be1b0ca17f221784163975132 | 3,656,327 |
def get_current_blk_file(current_file_number) -> str:
"""
Returns the current blk file name with file format.
"""
return get_current_file_name(blk_file_format(current_file_number)) | 44c81a5977f42fe38426231421bc3c2b76c36717 | 3,656,328 |
def exec_cmd_status(ceph_installer, commands):
"""
Execute command
Args:
ceph_installer: installer object to exec cmd
commands: list of commands to be executed
Returns:
Boolean
"""
for cmd in commands:
out, err = ceph_installer.exec_command(sudo=True, cmd=cmd)
out, err = out.read().decode().strip(), err.read().decode().strip()
logger.info("Command Response : {} {}".format(out, err))
return True | b5deddf504e1ae0cbc67a5b937d75bb02984b224 | 3,656,329 |
import logging
def BuildIsAvailable(bucket_name, remote_path):
"""Checks whether a build is currently archived at some place."""
logging.info('Checking existance: gs://%s/%s' % (bucket_name, remote_path))
try:
exists = cloud_storage.Exists(bucket_name, remote_path)
logging.info('Exists? %s' % exists)
return exists
except cloud_storage.CloudStorageError:
return False | c1947339a00a538c910e669179d19c986cab5b7e | 3,656,330 |
def _channel_name(row, prefix="", suffix=""):
"""Formats a usable name for the repeater."""
length = 16 - len(prefix)
name = prefix + " ".join((row["CALL"], row["CITY"]))[:length]
if suffix:
length = 16 - len(suffix)
name = ("{:%d.%d}" % (length, length)).format(name) + suffix
return name | 4452670e28b614249fb184dd78234e52ee241086 | 3,656,331 |
def wordsinunit(unit):
"""Counts the words in the unit's source and target, taking plurals into
account. The target words are only counted if the unit is translated."""
(sourcewords, targetwords) = (0, 0)
if isinstance(unit.source, multistring):
sourcestrings = unit.source.strings
else:
sourcestrings = [unit.source or ""]
for s in sourcestrings:
sourcewords += wordcount(s)
if not unit.istranslated():
return sourcewords, targetwords
if isinstance(unit.target, multistring):
targetstrings = unit.target.strings
else:
targetstrings = [unit.target or ""]
for s in targetstrings:
targetwords += wordcount(s)
return sourcewords, targetwords | 57f6be28eab17ee2bd2cd31783809bd8a413c09e | 3,656,332 |
def check_instance(arg, types, allow_none=False, message='Argument "%(string)s" is not of type %(expected)s, but of type %(actual)s', level=1):
"""
>>> check_instance(1, int)
1
>>> check_instance(3.5, float)
3.5
>>> check_instance('hello', str)
'hello'
>>> check_instance([1, 2, 3], list)
[1, 2, 3]
>>> check_instance(1, (int, float))
1
>>> check_instance(3.5, (int, float))
3.5
>>> check_instance('hello', (str, list))
'hello'
>>> check_instance([1, 2, 3], (str, list))
[1, 2, 3]
>>> check_instance(1, float)
Traceback (most recent call last):
...
AssertionError: Argument "1" is not of type <class 'float'>, but of type <class 'int'>
>>> check_instance(3.5, int)
Traceback (most recent call last):
...
AssertionError: Argument "3.5" is not of type <class 'int'>, but of type <class 'float'>
>>> check_instance('hello', list)
Traceback (most recent call last):
...
AssertionError: Argument "hello" is not of type <class 'list'>, but of type <class 'str'>
>>> check_instance([1, 2, 3], str)
Traceback (most recent call last):
...
AssertionError: Argument "[1, 2, 3]" is not of type <class 'str'>, but of type <class 'list'>
>>> check_instance(1, (list, str))
Traceback (most recent call last):
...
AssertionError: Argument "1" is not of type (<class 'list'>, <class 'str'>), but of type <class 'int'>
>>> check_instance(3.5, (list, str))
Traceback (most recent call last):
...
AssertionError: Argument "3.5" is not of type (<class 'list'>, <class 'str'>), but of type <class 'float'>
>>> check_instance('hello', (int, float))
Traceback (most recent call last):
...
AssertionError: Argument "hello" is not of type (<class 'int'>, <class 'float'>), but of type <class 'str'>
>>> check_instance([1, 2, 3], (int, float))
Traceback (most recent call last):
...
AssertionError: Argument "[1, 2, 3]" is not of type (<class 'int'>, <class 'float'>), but of type <class 'list'>
>>> check_instance(None, int)
Traceback (most recent call last):
...
AssertionError: Argument "None" is not of type <class 'int'>, but of type <class 'NoneType'>
>>> check_instance(None, float)
Traceback (most recent call last):
...
AssertionError: Argument "None" is not of type <class 'float'>, but of type <class 'NoneType'>
>>> check_instance(None, str)
Traceback (most recent call last):
...
AssertionError: Argument "None" is not of type <class 'str'>, but of type <class 'NoneType'>
>>> check_instance(None, list)
Traceback (most recent call last):
...
AssertionError: Argument "None" is not of type <class 'list'>, but of type <class 'NoneType'>
>>> check_instance(None, (int, float))
Traceback (most recent call last):
...
AssertionError: Argument "None" is not of type (<class 'int'>, <class 'float'>), but of type <class 'NoneType'>
>>> check_instance(None, (str, list))
Traceback (most recent call last):
...
AssertionError: Argument "None" is not of type (<class 'str'>, <class 'list'>), but of type <class 'NoneType'>
>>> check_instance(1, int, allow_none=True)
1
>>> check_instance(3.5, float, allow_none=True)
3.5
>>> check_instance('hello', str, allow_none=True)
'hello'
>>> check_instance([1, 2, 3], list, allow_none=True)
[1, 2, 3]
>>> check_instance(1, (int, float), allow_none=True)
1
>>> check_instance(3.5, (int, float), allow_none=True)
3.5
>>> check_instance('hello', (str, list), allow_none=True)
'hello'
>>> check_instance([1, 2, 3], (str, list), allow_none=True)
[1, 2, 3]
>>> check_instance(1, float, allow_none=True)
Traceback (most recent call last):
...
AssertionError: Argument "1" is not of type <class 'float'>, but of type <class 'int'>
>>> check_instance(3.5, int, allow_none=True)
Traceback (most recent call last):
...
AssertionError: Argument "3.5" is not of type <class 'int'>, but of type <class 'float'>
>>> check_instance('hello', list, allow_none=True)
Traceback (most recent call last):
...
AssertionError: Argument "hello" is not of type <class 'list'>, but of type <class 'str'>
>>> check_instance([1, 2, 3], str, allow_none=True)
Traceback (most recent call last):
...
AssertionError: Argument "[1, 2, 3]" is not of type <class 'str'>, but of type <class 'list'>
>>> check_instance(1, (list, str), allow_none=True)
Traceback (most recent call last):
...
AssertionError: Argument "1" is not of type (<class 'list'>, <class 'str'>), but of type <class 'int'>
>>> check_instance(3.5, (list, str), allow_none=True)
Traceback (most recent call last):
...
AssertionError: Argument "3.5" is not of type (<class 'list'>, <class 'str'>), but of type <class 'float'>
>>> check_instance('hello', (int, float), allow_none=True)
Traceback (most recent call last):
...
AssertionError: Argument "hello" is not of type (<class 'int'>, <class 'float'>), but of type <class 'str'>
>>> check_instance([1, 2, 3], (int, float), allow_none=True)
Traceback (most recent call last):
...
AssertionError: Argument "[1, 2, 3]" is not of type (<class 'int'>, <class 'float'>), but of type <class 'list'>
>>> check_instance(None, int, allow_none=True)
>>> check_instance(None, float, allow_none=True)
>>> check_instance(None, str, allow_none=True)
>>> check_instance(None, list, allow_none=True)
>>> check_instance(None, (int, float), allow_none=True)
>>> check_instance(None, (int, float), allow_none=True)
>>> check_instance(None, (str, list), allow_none=True)
>>> check_instance(None, (str, list), allow_none=True)
"""
check(is_instance(arg, types, allow_none), lambda: message % {'string': str(arg), 'actual': type(arg), 'expected': types}, level)
return arg | 362d6101e9f6b88077f8615043713989576c7713 | 3,656,333 |
def spec_lnlike(params, labels, grid_param_list, lbda_obs, spec_obs, err_obs,
dist, model_grid=None, model_reader=None, em_lines={},
em_grid={}, dlbda_obs=None, instru_corr=None,
instru_fwhm=None, instru_idx=None, filter_reader=None,
AV_bef_bb=False, units_obs='si', units_mod='si', interp_order=1):
""" Define the likelihood log-function.
Parameters
----------
params : tuple
Set of models parameters for which the model grid has to be
interpolated.
labels: Tuple of strings
Tuple of labels in the same order as initial_state, that is:
- first all parameters related to loaded models (e.g. 'Teff', 'logg')
- then the planet photometric radius 'R', in Jupiter radius
- (optionally) the flux of emission lines (labels should match those in
the em_lines dictionary), in units of the model spectrum (times mu)
- (optionally) the optical extinction 'Av', in mag
- (optionally) the ratio of total to selective optical extinction 'Rv'
- (optionally) 'Tbb1', 'Rbb1', 'Tbb2', 'Rbb2', etc. for each extra bb
contribution.
grid_param_list : list of 1d numpy arrays/lists OR None
- If list, should contain list/numpy 1d arrays with available grid of
model parameters.
- Set to None for a pure n-blackbody fit, n=1,2,...
- Note1: model grids should not contain grids on radius and Av, but
these should still be passed in initial_state (Av optional).
- Note2: for a combined grid model + black body, just provide
the grid parameter list here, and provide values for 'Tbbn' and 'Rbbn'
in initial_state, labels and bounds.
lbda_obs : numpy 1d ndarray or list
Wavelength of observed spectrum. If several instruments, should be
ordered per instrument, not necessarily as monotonically increasing
wavelength. Hereafter, n_ch = len(lbda_obs).
spec_obs : numpy 1d ndarray or list
Observed spectrum for each value of lbda_obs.
err_obs : numpy 1d/2d ndarray or list
Uncertainties on the observed spectrum. If 2d array, should be [2,n_ch]
where the first (resp. second) column corresponds to lower (upper)
uncertainty, and n_ch is the length of lbda_obs and spec_obs.
dist : float
Distance in parsec, used for flux scaling of the models.
model_grid : numpy N-d array, optional
If provided, should contain the grid of model spectra for each
free parameter of the given grid. I.e. for a grid of n_T values of Teff
and n_g values of Logg, the numpy array should be n_T x n_g x n_ch x 2,
where n_ch is the number of wavelengths for the observed spectrum,
and the last 2 dims are for wavelength and fluxes respectively.
If provided, takes precedence over model_name/model_reader.
model_reader : python routine, opt
External routine that reads a model file and returns a 2D numpy array,
where the first column corresponds to wavelengths, and the second
contains model values. See example routine in model_interpolation()
description.
em_lines: dictionary, opt
Dictionary of emission lines to be added on top of the model spectrum.
Each dict entry should be the name of the line, assigned to a tuple of
4 values:
1) the wavelength (in mu);
2) a string indicating whether line intensity is expressed in flux
('F'), luminosity ('L') or log(L/LSun) ("LogL");
3) the FWHM of the gaussian (or None if to be set automatically);
4) whether the FWHM is expressed in 'nm', 'mu' or 'km/s'.
The third and fourth can also be set to None. In that case, the FWHM of
the gaussian will automatically be set to the equivalent width of the
line, calculated from the flux to be injected and the continuum
level (measured in the grid model to which the line is injected).
Examples: em_lines = {'BrG':(2.1667,'F',None, None)};
em_lines = {'BrG':(2.1667,'LogL', 100, 'km/s')}
em_grid: dictionary pointing to lists, opt
Dictionary where each entry corresponds to an emission line and points
to a list of values to inject for emission line fluxes. For computation
efficiency, interpolation will be performed between the points of this
grid during the MCMC sampling. Dict entries should match labels and
em_lines.
dlbda_obs: numpy 1d ndarray or list, optional
Spectral channel width for the observed spectrum. It should be provided
IF one wants to weigh each point based on the spectral
resolution of the respective instruments (as in Olofsson et al. 2016).
instru_corr : numpy 2d ndarray or list, optional
Spectral correlation throughout post-processed images in which the
spectrum is measured. It is specific to the combination of instrument,
algorithm and radial separation of the companion from the central star.
Can be computed using distances.spectral_correlation(). In case of
a spectrum obtained with different instruments, build it with
distances.combine_corrs(). If not provided, it will consider the
uncertainties in each spectral channels are independent. See Greco &
Brandt (2017) for details.
instru_fwhm : float or list, optional
The instrumental spectral fwhm provided in nm. This is used to convolve
the model spectrum. If several instruments are used, provide a list of
instru_fwhm values, one for each instrument whose spectral resolution
is coarser than the model - including broad band
filter FWHM if relevant.
instru_idx: numpy 1d array, optional
1d array containing an index representing each instrument used
to obtain the spectrum, label them from 0 to n_instru. Zero for points
that don't correspond to any instru_fwhm provided above, and i in
[1,n_instru] for points associated to instru_fwhm[i-1]. This parameter
must be provided if the spectrum consists of points obtained with
different instruments.
filter_reader: python routine, optional
External routine that reads a filter file and returns a 2D numpy array,
where the first column corresponds to wavelengths, and the second
contains transmission values. Important: if not provided, but strings
are detected in instru_fwhm, the default format assumed for the files:
- first row containing header
- starting from 2nd row: 1st column: WL in mu, 2nd column: transmission
Note: files should all have the same format and wavelength units.
AV_bef_bb: bool, optional
If both extinction and an extra bb component are free parameters,
whether to apply extinction before adding the BB component (e.g.
extinction mostly from circumplanetary dust) or after the BB component
(e.g. mostly insterstellar extinction).
units_obs : str, opt {'si','cgs','jy'}
Units of observed spectrum. 'si' for W/m^2/mu; 'cgs' for ergs/s/cm^2/mu
or 'jy' for janskys.
units_mod: str, opt {'si','cgs','jy'}
Units of the model. 'si' for W/m^2/mu; 'cgs' for ergs/s/cm^2/mu or 'jy'
for janskys. If different to units_obs, the spectrum units will be
converted.
interp_order: int, opt, {-1,0,1}
Interpolation mode for model interpolation.
-1: log interpolation (i.e. linear interpolatlion on log(Flux))
0: nearest neighbour model.
1: Order 1 spline interpolation.
Returns
-------
out: float
The log of the likelihood.
"""
if grid_param_list is not None:
if model_grid is None and model_reader is None:
msg = "model_name and model_reader must be provided"
raise TypeError(msg)
lbda_mod, spec_mod = make_model_from_params(params, labels, grid_param_list,
dist, lbda_obs, model_grid,
model_reader, em_lines, em_grid,
dlbda_obs, instru_fwhm,
instru_idx, filter_reader,
AV_bef_bb, units_obs, units_mod,
interp_order)
# evaluate the goodness of fit indicator
chi = goodness_of_fit(lbda_obs, spec_obs, err_obs, lbda_mod, spec_mod,
dlbda_obs=dlbda_obs, instru_corr=instru_corr,
instru_fwhm=instru_fwhm, instru_idx=instru_idx,
filter_reader=filter_reader, plot=False, outfile=None)
# log likelihood
lnlikelihood = -0.5 * chi
return lnlikelihood | 8411ec37268cd2c169b680b955678d13f0d10cbc | 3,656,334 |
def generic_list(request):
"""Returns a list of all of the document IDs in the matched DocStore."""
return umbrella_from_request(request).get_doc_ids() | 8c5f47c8816fca503c2c4fa93db1204b3b511157 | 3,656,335 |
def japan_results(request):
"""
view function returns template that displays New York-specific photos
"""
images = Image.filter_images_by_location(location_id=12)
return render(request, "all_pictures/japan.html", {"images":images}) | d0fd80eac7529f5b9b5699439cabb0c92f82f007 | 3,656,336 |
def add_yaml_literal_block(yaml_object):
"""
Get a yaml literal block representer function to convert normal strings into yaml literals during yaml dumping
Convert string to yaml literal block
yaml docs: see "Block mappings" in https://pyyaml.org/wiki/PyYAMLDocumentation
"""
def literal_str_representer(dumper, data):
return dumper.represent_scalar("tag:yaml.org,2002:str", data, style="|")
return yaml_object.add_representer(literal_block, literal_str_representer) | 47b4295394a67e92bcbc5d7cb4c25a0a1ca220dc | 3,656,337 |
from typing import List
from typing import Dict
from typing import Set
from typing import Optional
def _spans_to_array(
doc: Doc,
sources: List[str],
label2idx: Dict[str, int],
labels_without_prefix: Set[str],
prefixes: Optional[Set[str]] = None,
warn_missing_labels: bool = False
) -> np.ndarray:
"""Convert the annotations of a spacy document into a 2D array.
Each row corresponds to a token, and each column to a labelling
source. In other words, the value at (i,j) represents the prediction
of source j for token i. This prediction is expressed as the
index of the label in the labels.
NB:
- Sources should be a list of labelling sources. If empty, all sources
are employed.
- If `prefixes` are provided (e.g., [I, B, L]), it is assumed that the
labels in `label2idx` contain the prefixes (e.g., I-PERSON,
B-PERSON).
- If `prefixes` are not provided, it is assumed that the labels in
`label2idx` do not contain prefixes (e.g, PERSON).
- We also assume the O is label is at position 0.
"""
if sources is None:
sources = list(doc.spans.keys())
if warn_missing_labels:
missing_labels = set()
# Creating the numpy array itself
data = np.zeros((len(doc), len(sources)), dtype=np.int16)
for source_index, source in enumerate(sources):
for span in doc.spans.get(source, []):
if span.label_ not in labels_without_prefix:
if warn_missing_labels:
missing_labels.add(span.label_)
continue
if prefixes is None:
# Do not use prefix labels (e.g., use PER instead of
# B-PER, I-PER, etc.)
data[span.start:span.end, source_index] = label2idx[
span.label_
]
else:
# If the span is a single token, we can use U
if "U" in prefixes and len(span) == 1:
data[span.start, source_index] = label2idx[
"U-%s" % span.label_
]
continue
# Otherwise, we use B, I and L
if "B" in prefixes:
data[span.start, source_index] = label2idx[
"B-%s" % span.label_
]
if "I" in prefixes:
start_i = (span.start+1) if "B" in prefixes else span.start
end_i = (span.end-1) if "L" in prefixes else span.end
data[start_i:end_i, source_index] = label2idx[
"I-%s" % span.label_
]
if "L" in prefixes:
data[span.end-1, source_index] = label2idx[
"L-%s" % span.label_
]
if warn_missing_labels:
print(
"WARNING: \
Span labels were found in the dataset that were not provided \
in `labels_without_prefices`: {}".format(missing_labels)
)
return data | ce9f22726877b713eb373dbee9ebb719ef655f4a | 3,656,338 |
def d_out_dist_cooler(P_mass, rho_dist_cool, w_drift):
"""
Calculates the tube's diameter of out distilliat from distilliat cooler to distilliat volume.
Parameters
----------
P_mass : float
The mass flow rate of distilliat, [kg/s]
rho_dist_cool : float
The density of liquid at cooling temperature, [kg/m**3]
w_drift :float
The speed of steam at the tube, [m/s]
Returns
-------
d_out_dist_cooler : float
The tube's diameter of out distilliat from distilliat cooler to distilliat volume, [m]
References
----------
&&&
"""
return P_mass/(0,785*rho_dist_cool*w_drift) | 8d6dfb85aa954ef88c821d2ee1d0bb787d409e96 | 3,656,339 |
import socket
def is_port_in_use(port):
"""
test if a port is being used or is free to use.
:param port:
:return:
"""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
return s.connect_ex(('localhost', port)) == 0 | 5bbdd7b39c2380d2e07e85f483f3ea5072bb616b | 3,656,340 |
def create_variables_eagerly(getter, initial_value, **kwargs):
"""Attempts to force variable creation to be eager."""
eager_initial_value = None
if isinstance(initial_value, tf.Tensor):
if _is_eager_tensor(initial_value):
eager_initial_value = initial_value
else:
# Try to compute the static value (e.g. if the user used `tf.ones`).
eager_initial_value = tf.get_static_value(initial_value)
if eager_initial_value is not None:
# If we have an eager initial value we can create variables in eager mode.
with tf.init_scope():
return getter(initial_value=eager_initial_value, **kwargs)
else:
# Fall back to creating in whatever context we're in with user input.
return getter(initial_value=initial_value, **kwargs) | 832687547bd06aef61b8a1dca219564ef184dbb3 | 3,656,341 |
import time
def _Run(vm):
"""See base method.
Args:
vm: The vm to run the benchmark on.
Returns:
A list of sample.Sample objects.
"""
# Make changes e.g. compiler flags to spec config file.
if 'gcc' in FLAGS.runspec_config:
_OverwriteGccO3(vm)
# swap only if necessary; free local node memory and avoid remote memory;
# reset caches; set stack size to unlimited
# Also consider setting enable_transparent_hugepages flag to true
cmd = ('echo 1 | sudo tee /proc/sys/vm/swappiness && '
'echo 1 | sudo tee /proc/sys/vm/zone_reclaim_mode && '
'sync ; echo 3 | sudo tee /proc/sys/vm/drop_caches && '
'ulimit -s unlimited && ')
cmd += 'runcpu '
if FLAGS.spec17_build_only:
cmd += '--action build '
if FLAGS.spec17_rebuild:
cmd += '--rebuild '
version_specific_parameters = []
# rate runs require 2 GB minimum system main memory per copy,
# not including os overhead. Refer to:
# https://www.spec.org/cpu2017/Docs/system-requirements.html#memory
copies = min(vm.NumCpusForBenchmark(),
vm.total_free_memory_kb // (2 * KB_TO_GB_MULTIPLIER))
version_specific_parameters.append(' --copies=%s ' %
(FLAGS.spec17_copies or copies))
version_specific_parameters.append(
' --threads=%s ' % (FLAGS.spec17_threads or vm.NumCpusForBenchmark()))
if FLAGS.spec17_fdo:
version_specific_parameters.append('--feedback ')
vm.RemoteCommand('cd /scratch/cpu2017; mkdir fdo_profiles')
start_time = time.time()
stdout, _ = speccpu.Run(vm, cmd, ' '.join(FLAGS.spec17_subset),
version_specific_parameters)
if FLAGS.spec17_build_only:
if 'Error' in stdout and 'Please review this file' in stdout:
raise errors.Benchmarks.RunError('Error during SPEC compilation.')
return [
sample.Sample(
'compilation_time',
time.time() - start_time, 's', {
'spec17_subset': FLAGS.spec17_subset,
'gcc_version': build_tools.GetVersion(vm, 'gcc')
})
]
partial_results = True
# Do not allow partial results if any benchmark subset is a full suite.
for benchmark_subset in FLAGS.benchmark_subset:
if benchmark_subset in ['intspeed', 'fpspeed', 'intrate', 'fprate']:
partial_results = False
log_files = set()
for test in FLAGS.spec17_subset:
if test in LOG_FILENAME:
log_files.add(LOG_FILENAME[test])
else:
if test in INTSPEED_SUITE:
log_files.add(LOG_FILENAME['intspeed'])
elif test in INTRATE_SUITE:
log_files.add(LOG_FILENAME['intrate'])
elif test in FPSPEED_SUITE:
log_files.add(LOG_FILENAME['fpspeed'])
elif test in FPRATE_SUITE:
log_files.add(LOG_FILENAME['fprate'])
for log_file in log_files:
vm.RemoteCommand(
f'cp {vm.GetScratchDir()}/cpu2017/result/{log_file} ~/{log_file}.log')
vm.PullFile(vm_util.GetTempDir(), f'~/{log_file}.log')
samples = speccpu.ParseOutput(vm, log_files, partial_results, None)
for item in samples:
item.metadata['vm_name'] = vm.name
item.metadata['spec17_gcc_flags'] = FLAGS.spec17_gcc_flags
return samples | ba7c8575c45cdd7edccdf212d8ff29adb0d0fe1b | 3,656,342 |
def mixin_method(ufunc, rhs=None, transpose=True):
"""Decorator to register a mixin class method
Using this decorator ensures that derived classes that are declared
with the `mixin_class` decorator will also have the behaviors that this
class has.
ufunc : numpy.ufunc
A universal function (or NEP18 callable) that is hooked in awkward1,
i.e. it can be the first argument of a behavior
rhs : Set[type] or None
List of right-hand side argument types (leave None if unary function)
The left-hand side is expected to always be ``self`` of the parent class
If the function is not unary or binary, call for help :)
transpose : bool
Autmatically create a transpose signature (only makes sense for binary ufuncs)
"""
def register(method):
if not isinstance(rhs, (set, type(None))):
raise ValueError("Expected a set of right-hand-side argument types")
if transpose and rhs is not None:
def transposed(left, right):
return method(right, left)
method._awkward_mixin = (ufunc, rhs, transposed)
else:
method._awkward_mixin = (ufunc, rhs, None)
return method
return register | d1130740628eb947bd786bc3393343b8c283164d | 3,656,343 |
def set_def_quick_print(setting):
"""
Set the global default (henceforth) behavior whether to quick print
when stamping or stopping.
Args:
setting: Passed through bool().
Returns:
bool: Implemented setting value.
"""
setting = bool(setting)
SET['QP'] = setting
return setting | 835028c97fb03435de65df6f13c5c05fe61710f0 | 3,656,344 |
from datetime import datetime
def time_handler(start_time, start_fmt, elaps_fmt, today):
"""return StartTime, ElapsedTime tuple using
start/sub time string"""
start_time = datetime.strptime(start_time, '%Y-%m-%dT%H:%M:%S')
start_time = StartTime(start_time.year, start_time.month,
start_time.day, start_time.hour,
start_time.minute, start_time.second)
start_time.fmt = start_fmt
delta = today - start_time
delta = ElapsedTime(delta.days, delta.seconds, 0)
delta.fmt = elaps_fmt
return start_time, delta | 7f063a119947f90a24d76fd2f5ce7eba790a3df5 | 3,656,345 |
def lgb_multi_weighted_logloss_exgal(y_preds, train_data):
"""
@author olivier https://www.kaggle.com/ogrellier
https://www.kaggle.com/ogrellier/plasticc-in-a-kernel-meta-and-data/code
multi logloss for PLAsTiCC challenge
"""
# class_weights taken from Giba's topic : https://www.kaggle.com/titericz
# https://www.kaggle.com/c/PLAsTiCC-2018/discussion/67194
# with Kyle Boone's post https://www.kaggle.com/kyleboone
y_true = train_data.get_label()
if len(np.unique(y_true)) > 14:
classes_exgal.append(99)
class_weight_exgal[99] = 2
y_p = y_preds.reshape(y_true.shape[0], len(classes_exgal), order='F')
# normalize
y_p /= y_p.sum(1)[:,None]
# Trasform y_true in dummies
y_ohe = pd.get_dummies(y_true)
# Normalize rows and limit y_preds to 1e-15, 1-1e-15
y_p = np.clip(a=y_p, a_min=1e-15, a_max=1 - 1e-15)
# Transform to log
y_p_log = np.log(y_p)
# Get the log for ones, .values is used to drop the index of DataFrames
# Exclude class 99 for now, since there is no class99 in the training set
# we gave a special process for that class
y_log_ones = np.sum(y_ohe.values * y_p_log, axis=0)
# Get the number of positives for each class
nb_pos = y_ohe.sum(axis=0).values.astype(float)
# Weight average and divide by the number of positives
class_arr = np.array([class_weight_exgal[k] for k in sorted(class_weight_exgal.keys())])
y_w = y_log_ones * class_arr / nb_pos
loss = - np.sum(y_w) / np.sum(class_arr)
return 'wloss', loss, False | 576e0263f9088341bd4d8b0e7e016de513da26ca | 3,656,346 |
def api_owner_required(f):
"""
Authorization decorator for api requests that require the record's owner
Ensure a user is admin or the actual user who created the record,
if not send a 400 error.
:return: Function
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if current_user.is_admin():
return f(*args, **kwargs)
else:
user_id = kwargs['user_id']
if current_user.id != user_id:
abort(400)
return f(*args, **kwargs)
return decorated_function | 4114abf4abc8afd1fd6d68388c17ed04e4029c13 | 3,656,347 |
def flatten_probas_ori(probas, labels, ignore=None):
"""
Flattens predictions in the batch
"""
if probas.dim() == 3:
# assumes output of a sigmoid layer
B, H, W = probas.size()
probas = probas.view(B, 1, H, W)
B, C, H, W = probas.size()
probas = probas.permute(0, 2, 3, 1).contiguous().view(-1, C) # B * H * W, C = P, C
labels = labels.view(-1)
if ignore is None:
return probas, labels
valid = (labels != ignore)
vprobas = probas[valid.nonzero().squeeze()]
vlabels = labels[valid]
return vprobas, vlabels | 95d0df96a7ea612546616cc96b8ea78f5bd52641 | 3,656,349 |
def VisionTransformer_small(pretrained=False,input_shape=(3,224,224),patch_size=16,num_classes=1000, depth=8,drop_rate=0.2,**kwargs):
""" My custom 'small' ViT model. Depth=8, heads=8= mlp_ratio=3."""
vit= VisionTransformer( patch_size=patch_size,num_classes=num_classes, depth=depth,
num_heads=12, mlp_ratio=3., qkv_bias=False, qk_scale=768 ** -0.5, representation_size=None,
drop_rate=drop_rate, attn_drop_rate=drop_rate, drop_path_rate=drop_rate, hybrid_backbone=None)
model=ImageClassificationModel(input_shape=input_shape,output=vit)
if pretrained:
# NOTE my scale was wrong for original weights, leaving this here until I have better ones for this model
vit.qk_scale=768 ** -0.5
return model | 0022e3371c5c9cc138a78f1927f807a91d077f3c | 3,656,351 |
def query_filter_choices(arg=None, fq=[]):
"""
Makes solr query and returns facets for tickets.
:param arg: solr query, string
"""
params = {
'short_timeout': True,
'fq': [
'project_id_s:%s' % c.project._id,
'mount_point_s:%s' % c.app.config.options.mount_point,
'type_s:Ticket',
] + fq,
'rows': 0,
}
params.update(FACET_PARAMS)
result = search(arg, **params)
return get_facets(result) | 226dd808a42981b6183c4425231107d0e7197b2b | 3,656,354 |
def has_no_duplicates(input_):
"""Check that a list contains no duplicates.
For example:
['aa', 'bb', 'cc'] is valid.
['aa', 'bb', 'aa'] is not valid. The word aa appears more than once.
"""
return len(input_) == len(set(input_)) | 6bc1b29b3509e4b17523408ea362591cace8d05d | 3,656,355 |
def uplab_to_renotation_specification(spec, lab):
"""Convert a color in the normalized UP LAB space to its equivalent Munsell color.
Parameters
----------
lab : np.ndarray of shape (3,) and dtype float
The `l', `a-star` and `b-star` values for the color, with `l` in the domain [0, 1],
and `a-star` and `b-star` each in the domain [-0.5, 0.5].
Returns
-------
np.ndarray of shape (4,) and dtype float
A Colorlab-compatible Munsell specification (`hue_shade`, `value`, `chroma`, `hue_index`),
with `hue_shade` one of [0, 2.5, 5, 7.5], `value` one of [0, 1, 2, ..., 10],
`chroma` one of [0, 2, 4, ..., 50] and `hue_index` one of [1, 2, 3, ..., 10].
Notes
-----
Measures the distance in the UP LAB a-b color plane at the given `l` (luminosity) value
between the given `a*` and `b*` values and those of 4 bracketing `a*` and `b*` value
pairs from the Munsell renotation (`hue_shade` of 2.5, 5, 7.5 and 10, and `chroma` one
of [0, 2, 4, ..., 50]). Selects the one with the closest cartesian distance to the
given target.
"""
hue_shade, value, chroma, hue_index = spec
v_ren = value
if v_ren < 1:
v_ren = 1
elif v_ren > 9 and v_ren < 9.9:
v_ren = 9
v_ren = round(v_ren)
if np.isnan(hue_shade):
# Grays
spec[1] = v_ren
return spec
# Colors
c0, _ = divmod(chroma, 2)
c0 = c0 * 2
c1 = c0 + 2
h0, _ = divmod(hue_shade, 2.5)
h0 = h0 * 2.5
h1 = h0 + 2.5
l, a_star, b_star = lab
closest_dist = None
closest = None
for ct in [c0, c1]:
for ht in [h0, h1]:
test_spec = munsellkit.normalized_color(
np.array([ht, value, ct, hue_index]),
rounding='renotation', out='spec')
lt, at, bt = munsell_specification_to_uplab(test_spec)
distance_sq = (at - a_star) * (at - a_star) + (bt - b_star) * (bt - b_star)
# print(f'test {test_spec}: distance is {distance_sq}')
if closest_dist is None or closest_dist > distance_sq:
closest_dist = distance_sq
closest = test_spec
closest[1] = v_ren
return closest | 7354ee53f9067f4720c438d1a8f743ca0b441c51 | 3,656,356 |
def _getBestSize(value):
"""
Give a size in bytes, convert it into a nice, human-readable value
with units.
"""
if value >= 1024.0**4:
value = value / 1024.0**4
unit = 'TB'
elif value >= 1024.0**3:
value = value / 1024.0**3
unit = 'GB'
elif value >= 1024.0**2:
value = value / 1024.0**2
unit = 'MB'
elif value >= 1024.0:
value = value / 1024.0
unit = 'kB'
else:
unit = 'B'
return value, unit | 6c1859c50edcbd5715443fbf30775eeee83d6a0c | 3,656,358 |
def create_line_segments(df, x="lon", y="lat", epsg=4269):
"""Creates a GeodataFrame of line segments from the
shapes dataframe (CRS is NAD83)
Params:
df (DataFrame): pandas DataFrame
x, y (str, optional) Default values x="lon", y="lat",
column names for x and y coordinates
epsg (int): Default value epsg=4269; EPSG value for x,y coordinate system
Returns:
gdf: (GeoDataFrame) Line GeoDataFrame in passed Coordinate System
"""
if df[x].isna().sum() > 0 or df[y].isna().sum() > 0:
raise f"DataFrame contains Null coordinates; consider removing rows with Null {x,y} values"
points = [Point(xy) for xy in zip(df[x], df[y])]
gdf = gpd.GeoDataFrame(df.copy(), geometry=points)
line_segments = (
gdf.groupby(["shape_id"])["geometry"]
.apply(lambda x: LineString(x.tolist()))
.reset_index()
)
gdf_out = gpd.GeoDataFrame(line_segments, geometry="geometry", crs=from_epsg(epsg))
return gdf_out | a0b23c165dc808cc2793f3a62ce002dbf5990562 | 3,656,361 |
def population_correlation(data_matrix, x_index, y_index):
"""
data_matrix is a numpy multi-dimensional array (matrix)
x_index and y_index are the index for the first and second variables respectively
it returns the correlation between two variables in a data_matrix
"""
transposed_data = data_matrix.transpose()
x_population = transposed_data[x_index]
x_mean = np.mean(x_population)
x_std = np.std(x_population)
y_population = transposed_data[y_index]
y_mean = np.mean(y_population)
y_std = np.std(y_population)
# To calculate the expectation means to calculate the cov(x_population, y_population)
# This can also be done using numpy. For that use: np.cov(x_population, y_population, bias=True)
# bias=True indicates that we are calculating the population covariance
# np.cov returns a bxb matrix, where b is the amount of vectors passed as parameter, in our case b=2
expectation = np.mean((x_population - x_mean) * (y_population - y_mean))
std_product = x_std * y_std
return expectation/std_product | 5216a617b5afba9c784fa18cad8506fd57f64e61 | 3,656,362 |
from typing import Any
def upload(workspace: str, table: str) -> Any:
"""
Store a nested_json tree into the database in coordinated node and edge tables.
`workspace` - the target workspace.
`table` - the target table.
`data` - the nested_json data, passed in the request body.
"""
# Set up the parameters.
data = request.data.decode("utf8")
space = db.db(workspace)
edgetable_name = f"{table}_edges"
int_nodetable_name = f"{table}_internal_nodes"
leaf_nodetable_name = f"{table}_leaf_nodes"
# Set up the database targets.
if space.has_collection(edgetable_name):
edgetable = space.collection(edgetable_name)
else:
edgetable = space.create_collection(edgetable_name, edge=True)
if space.has_collection(int_nodetable_name):
int_nodetable = space.collection(int_nodetable_name)
else:
int_nodetable = space.create_collection(int_nodetable_name)
if space.has_collection(leaf_nodetable_name):
leaf_nodetable = space.collection(leaf_nodetable_name)
else:
leaf_nodetable = space.create_collection(leaf_nodetable_name)
# Analyze the nested_json data into a node and edge table.
(nodes, edges) = analyze_nested_json(data, int_nodetable_name, leaf_nodetable_name)
# Upload the data to the database.
edgetable.insert_many(edges)
int_nodetable.insert_many(nodes[0])
leaf_nodetable.insert_many(nodes[1])
return dict(
edgecount=len(edges), int_nodecount=len(nodes[0]), leaf_nodecount=len(nodes[1])
) | 7fb4d0c4c31f499944b263f1f1fedcff34667ea1 | 3,656,363 |
def validate_google_login(email):
"""
Validate a login completed via Google, returning the user id on success.
An ``ODPIdentityError`` is raised if the login cannot be permitted for any reason.
:param email: the Google email address
:raises ODPUserNotFound: if there is no user account for the given email address
:raises ODPAccountLocked: if the user account has been temporarily locked
:raises ODPAccountDisabled: if the user account has been deactivated
:raises ODPEmailNotVerified: if the email address has not been verified
"""
user = get_user_by_email(email)
if not user:
raise x.ODPUserNotFound
if is_account_locked(user.id):
raise x.ODPAccountLocked
if not user.active:
raise x.ODPAccountDisabled
return user.id | 36e095f58600b6b8a799c459ad6181afafcbcf93 | 3,656,364 |
def add_months(start_date, months, date_format=DATE_FORMAT):
"""
Return a date with an added desired number of business months
Example 31/1/2020 + 1 month = 29/2/2020 (one business month)
"""
new_date = start_date + relativedelta(months=+months)
return new_date.strftime(date_format) | 7f579dd33807f30fa83a95b554882d6f8bf18626 | 3,656,365 |
def inVolts(mv):
""" Converts millivolts to volts... you know, to keep the API
consistent. """
return mv/1000.0 | 6c92195996be1aa2bd52aa0a95d247f7fdef5955 | 3,656,366 |
from typing import Mapping
from typing import Any
from typing import Tuple
import uuid
def extract_hit(
hit: Mapping[str, Any],
includes: Tuple[str] = (ID_FIELD,),
source: str = '_source'
) -> Mapping[str, Any]:
"""
Extract a document from a single search result hit.
:param hit: the search hit document
:param includes: the metadata keys to include in the return document
:param source: the key that contains the source document
:return:
"""
doc = {
**{
k: hit.get(k) for k in includes
},
**hit.get(source)
}
# If the document ID is included...
if ID_FIELD in doc:
# ...convert it to a UUID.
doc[ID_FIELD] = uuid.UUID(doc.get(ID_FIELD))
return doc | d67f68618bbfe0e86c1525845cf4af69be31a8df | 3,656,367 |
import time
import random
def generateUserIDToken(id):
"""Generates a unique user id token."""
t = int(time.time() * 1000)
r = int(random.random() * 100000000000000000)
data = "%s %s %s %s" % (ip, t, r, id)
return md5(data.encode('utf-8')).hexdigest() | bc523855df0b911868d802352c83bb99d4768cf3 | 3,656,368 |
from pytools import generate_nonnegative_integer_tuples_summing_to_at_most \
def grad_simplex_monomial_basis(dims, n):
"""Return the gradients of the functions returned by
:func:`simplex_monomial_basis`.
:returns: a :class:`tuple` of functions, each of which
accepts arrays of shape *(dims, npts)*
and returns a :class:`tuple` of length *dims* containing
the derivatives along each axis as an array of size *npts*.
'Scalar' evaluation, by passing just one vector of length *dims*,
is also supported.
.. versionadded:: 2016.1
"""
warn("grad_simplex_monomial_basis_with_mode_ids is deprecated. "
"Use monomial_basis_for_space instead. "
"This function will go away in 2022.",
DeprecationWarning, stacklevel=2)
as gnitstam
return tuple(partial(grad_monomial, order) for order in gnitstam(n, dims)) | 6be49780a984b9a8fb1b54073d845da683d06e36 | 3,656,369 |
from typing import Collection
def get_collection() -> Collection:
"""Коллекция для хранения моделей."""
return _COLLECTION | e82f93a14e1a6640fe9b7b02b062540073060acf | 3,656,370 |
def ask_for_flasherhwver():
"""
Ask for the flasher version, either 1 or 2 right now...
"""
#if FLASHER_SKIP_ON_VALID_DETECTION and FLASHER_VERSION != 1:
# return FLASHER_VERSION
FLASHER_VERSION = 1
flash_version = FLASHER_VERSION
if FLASHER_VERSION is None:
while True:
try:
flash_version = int(raw_input("--- Enter version of programmer hardware [Available Versions: Programmer V1 or Programmer V2]: ".format(FLASHVER=flash_version)))
except:
pass
if flash_version == 1 or flash_version == 2:
break
print("<<< USER REPORTED HARDWARE FLASHER REVISION AS VERSION", flash_version, ">>>")
return flash_version | 6d9cf88ce3fd6850e345431b85ee0ed1bcaffb84 | 3,656,371 |
import torch
def real_to_complex_channels(x, separate_real_imag=False):
""" Inverse of complex_as_real_channels: C*2 real channels (or 2*C if separate_real_imag) to C complex channels. """
if separate_real_imag:
channel_shape = (2, -1)
permute = (0, 2, 3, 4, 1)
else:
channel_shape = (-1, 2)
permute = (0, 1, 3, 4, 2)
return torch.view_as_complex(channel_reshape(x, channel_shape).permute(*permute).contiguous()) | 76692fb1597ae30d99672361e9a6db74f9d1dd86 | 3,656,372 |
def create_coffee_machine() -> CoffeeMachine:
"""Create CoffeeMachine object for testing"""
_coffee_machine = CoffeeMachine()
_coffee_machine.refill_water()
_coffee_machine.refill_milk()
_coffee_machine.refill_coffee_beans()
return _coffee_machine | 10c203249c681d8f13058227521532aefaeda478 | 3,656,373 |
def validate_mqtt_vacuum(value):
"""Validate MQTT vacuum schema."""
schemas = {LEGACY: PLATFORM_SCHEMA_LEGACY, STATE: PLATFORM_SCHEMA_STATE}
return schemas[value[CONF_SCHEMA]](value) | 6c850f7f72d61ef0b0b04363a31d2563bf316d33 | 3,656,374 |
def detect(iring, mode, axis=None, *args, **kwargs):
"""Apply square-law detection to create polarization products.
Args:
iring (Ring or Block): Input data source.
mode (string):
``'scalar': x -> real x.x*``
``'jones': x,y -> complex x.x* + 1j*y.y*, x.y*``
``'stokes': x,y -> real I, Q, U, V``
axis: Integer or string specifying the polarization axis. Defaults to
'pol'. Not used if mode = 'scalar'.
*args: Arguments to ``bifrost.pipeline.TransformBlock``.
**kwargs: Keyword Arguments to ``bifrost.pipeline.TransformBlock``.
**Tensor semantics**::
Input: [..., 'pol', ...], dtype = any complex, space = CUDA
Output: [..., 'pol', ...], dtype = real or complex, space = CUDA
Returns:
DetectBlock: A new block instance.
"""
return DetectBlock(iring, mode, axis, *args, **kwargs) | 42690bf325e0d21f5839ac5f87e3d0be7ca42029 | 3,656,375 |
import watchdog
def is_watchdog_supported():
""" Return ``True`` if watchdog is available."""
try:
except ImportError:
return False
return True | 8c777b9a6b29876d902087f2b719519771b5fc2a | 3,656,377 |
def set_bit(arg1, x, bit, y):
"""
set_bit(Int_ctx arg1, Int_net x, unsigned int bit, Int_net y) -> Int_net
Parameters
----------
arg1: Int_ctx
x: Int_net
bit: unsigned int
y: Int_net
"""
return _api.set_bit(arg1, x, bit, y) | c5e7062a9e7f8f46bb4935905b9a485487c0bfad | 3,656,378 |
def get_time_format(format='medium', locale=LC_TIME):
"""Return the time formatting patterns used by the locale for the specified
format.
>>> get_time_format(locale='en_US')
<DateTimePattern u'h:mm:ss a'>
>>> get_time_format('full', locale='de_DE')
<DateTimePattern u'HH:mm:ss zzzz'>
:param format: the format to use, one of "full", "long", "medium", or
"short"
:param locale: the `Locale` object, or a locale string
"""
return Locale.parse(locale).time_formats[format] | d774c14a27b263f4a9cadfd4d144cd9bd0ce1fd3 | 3,656,379 |
def resource_type_service(resource_type):
"""Gets the service name from a resource type.
:exc:`ValueError` is raised if the resource type is invalid, see
:func:`parse_resource_type`.
>>> resource_type_service('AWS::ECS::Instance')
'ECS'
"""
return parse_resource_type(resource_type)[1] | 04ff4ffa22e742dbd63a41cb8f9eec79628938f2 | 3,656,380 |
def loads(ss):
""" loads(ss)
Load a struct from the given string.
Parameters
----------
ss : (Unicode) string
A serialized struct (obtained using ssdf.saves()).
"""
# Check
if not isinstance(ss, basestring):
raise ValueError('ssdf.loads() expects a string.')
# Read
reader = _SSDFReader()
return reader.text_to_struct(ss) | ee07c433f9453b5a9f444cbcda2b80217243f0f0 | 3,656,381 |
from typing import IO
import mimetypes
def guess_mime_type(file_object: IO) -> str:
"""Guess mime type from file extension."""
mime_type, _encoding = mimetypes.guess_type(file_object.name)
if not mime_type:
mime_type = "application/octet-stream"
return mime_type | 12e6e6667b08eaaa24b822c37d56055c1487a801 | 3,656,382 |
def postcount_test(metadict_friends):
"""Среднее число постов по выборке, чтобы выделить активных/неактивных неймфагов."""
all_postcount = 0
for namefag in metadict_friends.keys():
name_number = namefag[0]
name_postcount = cursor.execute("SELECT postcount FROM namefags WHERE number=?"\
,(name_number,)).fetchall()
all_postcount = all_postcount + int(name_postcount[0][0])
name_number = len(metadict_friends)
medial_postcount = all_postcount / name_number
return medial_postcount,all_postcount | dd9f717f8c1c81e6805a257e28e74124f156661f | 3,656,383 |
def extract_stack_name(fields):
"""_extract_stack_name(self, fields: list[str]) -> str
Extract a stack name from the fields
Examples:
ffffffff818244f2 [unknown] ([kernel.kallsyms]) -> [kernel.kallsyms]
1094d __GI___libc_recvmsg (/lib/x86_64-linux-gnu/libpthread-2.23.so) -> __GI__libc_recvmsg
"""
if fields[1] == '[unknown]':
return to_module_name(fields[2][1:-1])
return fields[1] | 093a2397da50bac3ce299256a6d9640af33bf59f | 3,656,384 |
def valid_shape(shape):
"""
@returns: True if given shape is a valid tetris shape
"""
return shape in SHAPES and len(shape) == 1 | e40fda46078a615b6d93438ea9d9e9d72800b25a | 3,656,387 |
def get_device(device_id):
"""
@api {get} /devices/:device_id Get Unique Device
@apiVersion 1.0.0
@apiName GetDevice
@apiGroup Device
@apiSuccess {Boolean} success Request status
@apiSuccess {Object} message Respond payload
@apiSuccess {Object} message.device Device object
"""
device_obj = Device.query.get(device_id)
if not device_obj:
return jsonify(success=False, message='not found'), 404
return jsonify(success=True, message={'device': device_obj.to_dict()}) | 0bad727a1a554d63db774179f45deacb1164ba18 | 3,656,388 |
import gzip
def read_imgs(filename, num_images):
"""读入图片数据
:param filename:
:param num_images:
:return:
"""
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(
28 * 28 * num_images * 1)
data = np.frombuffer(buf, dtype=np.uint8)
data = data.reshape(num_images, 28, 28, 1)
return data | a97602470c729211214b4d4a7acd0744beecdfae | 3,656,389 |
def all_faces(coord, connect):
""" Gets vertices of all faces of the mesh.
Args:
coord (:obj:`numpy.array`): Coordinates of the element.
connect (:obj:`numpy.array`): Element connectivity.
Returns:
Corresponding nodes.
"""
nodes_per_face = np.array([connect[:, [1,2,3,4]], connect[:, [5,6,7,8]], \
connect[:, [6,7,3,2]], connect[:, [7,8,4,3]], \
connect[:, [6,5,1,2]], connect[:, [5,8,4,1]]]).reshape(-1,4)
ind_faces = npi.indices(coord[:,0], nodes_per_face.flatten()).reshape(-1, 4)
return ind_faces | 9955260eae11bd6a32e76fb96468989922e856dc | 3,656,390 |
def edit_assignment(request_ctx, course_id, id, assignment_name=None, assignment_position=None, assignment_submission_types=None, assignment_allowed_extensions=None, assignment_turnitin_enabled=None, assignment_turnitin_settings=None, assignment_peer_reviews=None, assignment_automatic_peer_reviews=None, assignment_notify_of_update=None, assignment_group_category_id=None, assignment_grade_group_students_individually=None, assignment_external_tool_tag_attributes=None, assignment_points_possible=None, assignment_grading_type=None, assignment_due_at=None, assignment_lock_at=None, assignment_unlock_at=None, assignment_description=None, assignment_assignment_group_id=None, assignment_muted=None, assignment_assignment_overrides=None, assignment_only_visible_to_overrides=None, assignment_published=None, assignment_grading_standard_id=None, **request_kwargs):
"""
Modify an existing assignment.
If the assignment[assignment_overrides] key is absent, any existing
overrides are kept as is. If the assignment[assignment_overrides] key is
present, existing overrides are updated or deleted (and new ones created,
as necessary) to match the provided list.
NOTE: The assignment overrides feature is in beta.
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param course_id: (required) ID
:type course_id: string
:param id: (required) ID
:type id: string
:param assignment_name: (optional) The assignment name.
:type assignment_name: string or None
:param assignment_position: (optional) The position of this assignment in the group when displaying assignment lists.
:type assignment_position: integer or None
:param assignment_submission_types: (optional) List of supported submission types for the assignment. Unless the assignment is allowing online submissions, the array should only have one element. If not allowing online submissions, your options are: "online_quiz" "none" "on_paper" "online_quiz" "discussion_topic" "external_tool" If you are allowing online submissions, you can have one or many allowed submission types: "online_upload" "online_text_entry" "online_url" "media_recording" (Only valid when the Kaltura plugin is enabled)
:type assignment_submission_types: string or None
:param assignment_allowed_extensions: (optional) Allowed extensions if submission_types includes "online_upload" Example: allowed_extensions: ["docx","ppt"]
:type assignment_allowed_extensions: string or None
:param assignment_turnitin_enabled: (optional) Only applies when the Turnitin plugin is enabled for a course and the submission_types array includes "online_upload". Toggles Turnitin submissions for the assignment. Will be ignored if Turnitin is not available for the course.
:type assignment_turnitin_enabled: boolean or None
:param assignment_turnitin_settings: (optional) Settings to send along to turnitin. See Assignment object definition for format.
:type assignment_turnitin_settings: string or None
:param assignment_peer_reviews: (optional) If submission_types does not include external_tool,discussion_topic, online_quiz, or on_paper, determines whether or not peer reviews will be turned on for the assignment.
:type assignment_peer_reviews: boolean or None
:param assignment_automatic_peer_reviews: (optional) Whether peer reviews will be assigned automatically by Canvas or if teachers must manually assign peer reviews. Does not apply if peer reviews are not enabled.
:type assignment_automatic_peer_reviews: boolean or None
:param assignment_notify_of_update: (optional) If true, Canvas will send a notification to students in the class notifying them that the content has changed.
:type assignment_notify_of_update: boolean or None
:param assignment_group_category_id: (optional) If present, the assignment will become a group assignment assigned to the group.
:type assignment_group_category_id: integer or None
:param assignment_grade_group_students_individually: (optional) If this is a group assignment, teachers have the options to grade students individually. If false, Canvas will apply the assignment's score to each member of the group. If true, the teacher can manually assign scores to each member of the group.
:type assignment_grade_group_students_individually: integer or None
:param assignment_external_tool_tag_attributes: (optional) Hash of attributes if submission_types is ["external_tool"] Example: external_tool_tag_attributes: { // url to the external tool url: "http://instructure.com", // create a new tab for the module, defaults to false. new_tab: false }
:type assignment_external_tool_tag_attributes: string or None
:param assignment_points_possible: (optional) The maximum points possible on the assignment.
:type assignment_points_possible: float or None
:param assignment_grading_type: (optional) The strategy used for grading the assignment. The assignment is ungraded if this field is omitted.
:type assignment_grading_type: string or None
:param assignment_due_at: (optional) The day/time the assignment is due. Accepts times in ISO 8601 format, e.g. 2014-10-21T18:48:00Z.
:type assignment_due_at: timestamp or None
:param assignment_lock_at: (optional) The day/time the assignment is locked after. Accepts times in ISO 8601 format, e.g. 2014-10-21T18:48:00Z.
:type assignment_lock_at: timestamp or None
:param assignment_unlock_at: (optional) The day/time the assignment is unlocked. Accepts times in ISO 8601 format, e.g. 2014-10-21T18:48:00Z.
:type assignment_unlock_at: timestamp or None
:param assignment_description: (optional) The assignment's description, supports HTML.
:type assignment_description: string or None
:param assignment_assignment_group_id: (optional) The assignment group id to put the assignment in. Defaults to the top assignment group in the course.
:type assignment_assignment_group_id: integer or None
:param assignment_muted: (optional) Whether this assignment is muted. A muted assignment does not send change notifications and hides grades from students. Defaults to false.
:type assignment_muted: boolean or None
:param assignment_assignment_overrides: (optional) List of overrides for the assignment. NOTE: The assignment overrides feature is in beta.
:type assignment_assignment_overrides: assignmentoverride or None
:param assignment_only_visible_to_overrides: (optional) Whether this assignment is only visible to overrides (Only useful if 'differentiated assignments' account setting is on)
:type assignment_only_visible_to_overrides: boolean or None
:param assignment_published: (optional) Whether this assignment is published. (Only useful if 'draft state' account setting is on) Unpublished assignments are not visible to students.
:type assignment_published: boolean or None
:param assignment_grading_standard_id: (optional) The grading standard id to set for the course. If no value is provided for this argument the current grading_standard will be un-set from this course. This will update the grading_type for the course to 'letter_grade' unless it is already 'gpa_scale'.
:type assignment_grading_standard_id: integer or None
:return: Edit an assignment
:rtype: requests.Response (with Assignment data)
"""
assignment_submission_types_types = ('online_quiz', 'none', 'on_paper', 'online_quiz', 'discussion_topic', 'external_tool', 'online_upload', 'online_text_entry', 'online_url', 'media_recording')
assignment_grading_type_types = ('pass_fail', 'percent', 'letter_grade', 'gpa_scale', 'points')
utils.validate_attr_is_acceptable(assignment_submission_types, assignment_submission_types_types)
utils.validate_attr_is_acceptable(assignment_grading_type, assignment_grading_type_types)
path = '/v1/courses/{course_id}/assignments/{id}'
payload = {
'assignment[name]' : assignment_name,
'assignment[position]' : assignment_position,
'assignment[submission_types][]' : assignment_submission_types,
'assignment[allowed_extensions]' : assignment_allowed_extensions,
'assignment[turnitin_enabled]' : assignment_turnitin_enabled,
'assignment[turnitin_settings]' : assignment_turnitin_settings,
'assignment[peer_reviews]' : assignment_peer_reviews,
'assignment[automatic_peer_reviews]' : assignment_automatic_peer_reviews,
'assignment[notify_of_update]' : assignment_notify_of_update,
'assignment[group_category_id]' : assignment_group_category_id,
'assignment[grade_group_students_individually]' : assignment_grade_group_students_individually,
'assignment[points_possible]' : assignment_points_possible,
'assignment[grading_type]' : assignment_grading_type,
'assignment[due_at]' : assignment_due_at,
'assignment[lock_at]' : assignment_lock_at,
'assignment[unlock_at]' : assignment_unlock_at,
'assignment[description]' : assignment_description,
'assignment[assignment_group_id]' : assignment_assignment_group_id,
'assignment[muted]' : assignment_muted,
'assignment[assignment_overrides]' : assignment_assignment_overrides,
'assignment[only_visible_to_overrides]' : assignment_only_visible_to_overrides,
'assignment[published]' : assignment_published,
'assignment[grading_standard_id]' : assignment_grading_standard_id,
}
for attribute, value in list((assignment_external_tool_tag_attributes or {}).items()):
payload['assignment[external_tool_tag_attributes][{}]'.format(attribute)] = value
url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)
response = client.put(request_ctx, url, payload=payload, **request_kwargs)
return response | d83094e9d3e9ab66f5c6d4f5245dde80cf4579cc | 3,656,392 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.