content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import logging
def loadData(data_source, loc, run, indexes, ntry=0, __text__=None, __prog__=None):
"""
Loads the data from a remote source. Has hooks for progress bars.
"""
if __text__ is not None:
__text__.emit("Decoding File")
if data_source.getName() == "Local WRF-ARW":
url = data_source.getURLList(outlet="Local")[0].replace("file://", "")
decoder = ARWDecoder
dec = decoder((url, loc[0], loc[1]))
else:
decoder, url = data_source.getDecoderAndURL(loc, run, outlet_num=ntry)
logging.info("Using decoder: " + str(decoder))
logging.info("Data URL: " + url)
dec = decoder(url)
if __text__ is not None:
__text__.emit("Creating Profiles")
profs = dec.getProfiles(indexes=indexes)
return profs | d06fd6fb6194dac63911a5b9c3ad267525098cd2 | 3,718 |
def comp_psip_skin(self, u):
"""psip_skin for skin effect computation
Parameters
----------
self : Conductor
An Conductor object
Returns
-------
None
"""
y = (1 / u) * (sinh(u) + sin(u)) / (cosh(u) + cos(u)) # p257 Pyrhonen
# y[u==0]=1
return y | 23fe18a13b56f38c49fd3ca14b557983064c65b3 | 3,719 |
import random
def homepage(var=random.randint(0, 1000)):
"""
The function returns the homepage html template.
"""
return render_template("index.html", var=var) | 17e9033b8abeaa990cd31008861e5c412e35d1d7 | 3,720 |
import numbers
def tier(value):
"""
A special function of ordinals which does not
correspond to any mathematically useful function.
Maps ordinals to small objects, effectively compressing the range.
Used to speed up comparisons when the operands are very different sizes.
In the current version, this is a map from ordinals to 2-tuples of integers,
however, this is subject to change at any time, so please do not retain
long lived records of what tier an ordinal number is.
"""
if isinstance(value, numbers.Real):
value = ordinal(value)
if isinstance(value, ordinal):
return value._tier
raise ValueError('Value is not of a known type representing a mathematical ordinal.') | 851b36cf22c09d8f94168a0aa55292754451e351 | 3,721 |
from .models import Sequence
def get_next_value(
sequence_name="default",
initial_value=1,
reset_value=None,
*,
nowait=False,
using=None,
overrite=None,
):
"""
Return the next value for a given sequence.
"""
# Inner import because models cannot be imported before their application.
if reset_value is not None:
assert initial_value < reset_value
if using is None:
using = router.db_for_write(Sequence)
connection = connections[using]
db_table = connection.ops.quote_name(Sequence._meta.db_table)
if (
connection.vendor == "postgresql"
# Remove when dropping Django 2.2. Django 3.0 requires PostgreSQL 9.5.
and getattr(connection, "pg_version", 0) >= 90500
and reset_value is None
and not nowait
):
# PostgreSQL ≥ 9.5 supports "upsert".
# This is about 3x faster as the naive implementation.
with connection.cursor() as cursor:
cursor.execute(
POSTGRESQL_UPSERT.format(db_table=db_table),
[sequence_name, initial_value],
),
result = cursor.fetchone()
return result[0]
elif connection.vendor == "mysql" and reset_value is None and not nowait:
# MySQL supports "upsert" but not "returning".
# This is about 2x faster as the naive implementation.
with transaction.atomic(using=using, savepoint=False):
with connection.cursor() as cursor:
cursor.execute(
MYSQL_UPSERT.format(db_table=db_table),
[sequence_name, initial_value],
)
cursor.execute(
SELECT.format(db_table=db_table),
[sequence_name],
)
result = cursor.fetchone()
return result[0]
else:
# Default, ORM-based implementation for all other cases.
with transaction.atomic(using=using, savepoint=False):
sequences = Sequence.objects.select_for_update(nowait=nowait)
sequence, created = sequences.get_or_create(
name=sequence_name,
defaults={"last": initial_value},
)
if not created:
sequence.last += 1
if reset_value is not None and sequence.last >= reset_value:
sequence.last = initial_value
if overrite is not None:
sequence.last = overrite
sequence.save()
return sequence.last | 0770c8d4a4bea732bacfe6b7eaa404546bb79699 | 3,722 |
def expected_shd(posterior, ground_truth):
"""Compute the Expected Structural Hamming Distance.
This function computes the Expected SHD between a posterior approximation
given as a collection of samples from the posterior, and the ground-truth
graph used in the original data generation process.
Parameters
----------
posterior : np.ndarray instance
Posterior approximation. The array must have size `(B, N, N)`, where `B`
is the number of sample graphs from the posterior approximation, and `N`
is the number of variables in the graphs.
ground_truth : np.ndarray instance
Adjacency matrix of the ground-truth graph. The array must have size
`(N, N)`, where `N` is the number of variables in the graph.
Returns
-------
e_shd : float
The Expected SHD.
"""
# Compute the pairwise differences
diff = np.abs(posterior - np.expand_dims(ground_truth, axis=0))
diff = diff + diff.transpose((0, 2, 1))
# Ignore double edges
diff = np.minimum(diff, 1)
shds = np.sum(diff, axis=(1, 2)) / 2
return np.mean(shds) | 5e0daf39a13fc0a4cb7a4f5d0a9fe692fdae82db | 3,723 |
import json
def package_list_read(pkgpath):
"""Read package list"""
try:
with open(PACKAGE_LIST_FILE, 'r') as pkglistfile:
return json.loads(pkglistfile.read())
except Exception:
return [] | afb97afd20823563ecfda3b5c908f7ad70322868 | 3,724 |
import pandas
import types
def hpat_pandas_series_le(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.le` implementation.
.. only:: developer
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op8
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method le().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if level is not None or fill_value is not None or axis != 0:
raise TypingError('{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value, axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_le_impl(self, other):
"""
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op8
"""
return pandas.Series(self._data <= other._data)
return hpat_pandas_series_le_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_le_impl(self, other):
"""
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op8_integer_scalar
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op8_float_scalar
"""
return pandas.Series(self._data <= other)
return hpat_pandas_series_le_impl
raise TypingError('{} The object must be a pandas.series and argument must be a number. Given: {} and other: {}'.format(_func_name, self, other)) | f2969e17dd79b71a033e3c84ffd82e3bf2448554 | 3,725 |
def map_view(request):
"""
Place to show off the new map view
"""
# Define view options
view_options = MVView(
projection='EPSG:4326',
center=[-100, 40],
zoom=3.5,
maxZoom=18,
minZoom=2
)
# Define drawing options
drawing_options = MVDraw(
controls=['Modify', 'Delete', 'Move', 'Point', 'LineString', 'Polygon', 'Box'],
initial='Point',
output_format='GeoJSON'
)
# Define GeoJSON layer
geojson_object = {
'type': 'FeatureCollection',
'crs': {
'type': 'name',
'properties': {
'name': 'EPSG:3857'
}
},
'features': [
{
'type': 'Feature',
'geometry': {
'type': 'Point',
'coordinates': [0, 0]
}
},
{
'type': 'Feature',
'geometry': {
'type': 'LineString',
'coordinates': [[4e6, -2e6], [8e6, 2e6]]
}
},
{
'type': 'Feature',
'geometry': {
'type': 'Polygon',
'coordinates': [[[-5e6, -1e6], [-4e6, 1e6], [-3e6, -1e6]]]
}
}
]
}
# Define layers
map_layers = []
geojson_layer = MVLayer(source='GeoJSON',
options=geojson_object,
editable=False,
legend_title='Test GeoJSON',
legend_extent=[-46.7, -48.5, 74, 59],
legend_classes=[
MVLegendClass('polygon', 'Polygons', fill='rgba(255,255,255,0.8)', stroke='#3d9dcd'),
MVLegendClass('line', 'Lines', stroke='#3d9dcd')
])
map_layers.append(geojson_layer)
if get_geoserver_wms():
# Define GeoServer Layer
geoserver_layer = MVLayer(source='ImageWMS',
options={'url': get_geoserver_wms(),
'params': {'LAYERS': 'topp:states'},
'serverType': 'geoserver'},
legend_title='USA Population',
legend_extent=[-126, 24.5, -66.2, 49],
legend_classes=[
MVLegendClass('polygon', 'Low Density', fill='#00ff00', stroke='#000000'),
MVLegendClass('polygon', 'Medium Density', fill='#ff0000', stroke='#000000'),
MVLegendClass('polygon', 'High Density', fill='#0000ff', stroke='#000000')
])
map_layers.append(geoserver_layer)
# Define KML Layer
kml_layer = MVLayer(source='KML',
options={'url': '/static/tethys_gizmos/data/model.kml'},
legend_title='Park City Watershed',
legend_extent=[-111.60, 40.57, -111.43, 40.70],
legend_classes=[
MVLegendClass('polygon', 'Watershed Boundary', fill='#ff8000'),
MVLegendClass('line', 'Stream Network', stroke='#0000ff'),
])
map_layers.append(kml_layer)
# Tiled ArcGIS REST Layer
arc_gis_layer = MVLayer(source='TileArcGISRest',
options={'url': 'http://sampleserver1.arcgisonline.com/ArcGIS/rest/services/' +
'Specialty/ESRI_StateCityHighway_USA/MapServer'},
legend_title='ESRI USA Highway',
legend_extent=[-173, 17, -65, 72])
map_layers.append(arc_gis_layer)
# Define map view options
map_view_options = MapView(
height='600px',
width='100%',
controls=['ZoomSlider', 'Rotate', 'FullScreen',
{'MousePosition': {'projection': 'EPSG:4326'}},
{'ZoomToExtent': {'projection': 'EPSG:4326', 'extent': [-130, 22, -65, 54]}}],
layers=map_layers,
view=view_options,
basemap='OpenStreetMap',
draw=drawing_options,
legend=True
)
submitted_geometry = request.POST.get('geometry', None)
if submitted_geometry is not None:
messages.info(request, submitted_geometry)
context = {'map_view': map_view_options}
return render(request, 'tethys_gizmos/gizmo_showcase/map_view.html', context) | 5d037262b2c93c538b5a5b6fe076ee04a9d9b5ee | 3,727 |
def decompose_jamo(compound):
"""Return a tuple of jamo character constituents of a compound.
Note: Non-compound characters are echoed back.
WARNING: Archaic jamo compounds will raise NotImplementedError.
"""
if len(compound) != 1:
raise TypeError("decompose_jamo() expects a single character,",
"but received", type(compound), "length",
len(compound))
if compound not in JAMO_COMPOUNDS:
# Strict version:
# raise TypeError("decompose_jamo() expects a compound jamo,",
# "but received", compound)
return compound
return _JAMO_TO_COMPONENTS.get(compound, compound) | 56eb503b47a966d7f88750f7fdc1bcc55ba1aa1b | 3,728 |
from typing import Optional
def cp_in_drive(
source_id: str,
dest_title: Optional[str] = None,
parent_dir_id: Optional[str] = None,
) -> DiyGDriveFile:
"""Copy a specified file in Google Drive and return the created file."""
drive = create_diy_gdrive()
if dest_title is None:
dest_title = build_dest_title(drive, source_id)
return drive.copy_file(source_id, dest_title, parent_dir_id) | 981cfa18da78a160447778cab5f3326f35dbfc59 | 3,729 |
def label_tuning(
text_embeddings,
text_labels,
label_embeddings,
n_steps: int,
reg_coefficient: float,
learning_rate: float,
dropout: float,
) -> np.ndarray:
"""
With N as number of examples, K as number of classes, k as embedding dimension.
Args:
'text_embeddings': float[N,k] of embedded texts
'text_labels': float[N,K] class score for each example.
'label_embeddings': float[K,k] class embeddings
Returns:
float[K,k] updated class embeddings
"""
if text_embeddings.shape[0] == 0:
raise ValueError(text_embeddings.shape)
if label_embeddings.shape[0] == 0:
raise ValueError(label_embeddings.shape)
text_embeddings = tf.constant(text_embeddings)
text_labels = tf.constant(text_labels)
label_embeddings = tf.constant(label_embeddings)
init_label_embeddings = label_embeddings
for i in range(n_steps):
with tf.GradientTape() as tape:
tape.watch(label_embeddings)
dot_loss = _get_loss(
text_embeddings,
text_labels,
label_embeddings,
dropout=dropout,
)
drift_loss = tf.reduce_mean(
(label_embeddings - init_label_embeddings) ** 2
)
total_loss = dot_loss + reg_coefficient * drift_loss
gradient = tape.gradient(total_loss + drift_loss, label_embeddings)
label_embeddings = label_embeddings - (learning_rate * gradient)
label_embeddings = label_embeddings.numpy()
return label_embeddings | 83e4181c6600065bfb2cc98b4ca4957ea920ad7c | 3,730 |
def create_nan_filter(tensor):
"""Creates a layer which replace NaN's with zero's."""
return tf.where(tf.is_nan(tensor), tf.zeros_like(tensor), tensor) | 4e03c4c4c275430e5228e2d73b09e24f8c787e71 | 3,731 |
def requestor_is_superuser(requestor):
"""Return True if requestor is superuser."""
return getattr(requestor, "is_superuser", False) | 7b201601cf8a1911aff8271ff71b6d4d51f68f1a | 3,732 |
from typing import Dict
def process(business: Business, # pylint: disable=too-many-branches
filing: Dict,
filing_rec: Filing,
filing_meta: FilingMeta): # pylint: disable=too-many-branches
"""Process the incoming historic conversion filing."""
# Extract the filing information for incorporation
if not (conversion_filing := filing.get('filing', {}).get('conversion')):
raise QueueException(f'CONVL legal_filing:conversion missing from {filing_rec.id}')
if business:
raise QueueException(f'Business Already Exist: CONVL legal_filing:conversion {filing_rec.id}')
if not (corp_num := filing.get('filing', {}).get('business', {}).get('identifier')):
raise QueueException(f'conversion {filing_rec.id} missing the business idnetifier.')
# Initial insert of the business record
business_info_obj = conversion_filing.get('nameRequest')
if not (business := business_info.update_business_info(corp_num, Business(), business_info_obj, filing_rec)):
raise QueueException(f'CONVL conversion {filing_rec.id}, Unable to create business.')
if offices := conversion_filing.get('offices'):
update_offices(business, offices)
if parties := conversion_filing.get('parties'):
update_parties(business, parties)
if share_structure := conversion_filing.get('shareStructure'):
shares.update_share_structure(business, share_structure)
if name_translations := conversion_filing.get('nameTranslations'):
aliases.update_aliases(business, name_translations)
return business, filing_rec | 78f5033251cb90023c2e0c0ad064b92af5212e65 | 3,733 |
def est_const_bsl(bsl,starttime=None,endtime=None,intercept=False,val_tw=None):
"""Performs a linear regression (assuming the intercept at the origin).
The corresponding formula is tt-S*1/v-c = 0 in which tt is the travel
time of the acoustic signal in seconds and 1/v is the reciprocal of the
harmonic mean of the sound speed. The slope S is equal to the constant
baseline length and by default c is assumed to be 0, but can optionally
also be determined (intercept=True).
It needs:
bsl ... pandas.Dataframe with ID of beacon 1 ('ID'), ID of beacon 2
('range_ID'), calculated baseline lengths in metres ('bsl'), one
way traveltime in seconds ('tt'), sound speed at beacon 1 ('ssp1')
in metres per second, sound speed at beacon 2 ('ssp2') in metres per
second, measured traveltime in milliseconds ('range'), turn around
time in milliseconds ('TAT')(eventually harmonic mean of 'ssp1' and
'ssp2' ('hmssp') and reciprocal of harmonic mean of 'ssp1' and
'ssp2' ('1/v'); if they do not exist, they will be calculated) with
corresponding times of measurement for beacon pair.
starttime (optional) ... string with starttime of time window for
estimation of constant baseline length (format: 'YYYY-mm-dd
HH:MM:SS', default: first entry in bsl)
endtime (optional) ... string with endtime of time window for estimation
of constant baseline length (format: 'YYYY-mm-dd HH:MM:SS', default:
last entry in bsl)
intercept (optional) ... specify whether intercept should be set to
0 [False] or should be calculated [True] (default is False)
val_tw (optional) ... specify time window for which estimated constant
baseline length and standard deviation (as well as intercept) will be
stored in returned pandas.Dataframe (format: ['YYYY-mm-dd HH:MM:SS',
'YYYY-mm-dd HH:MM:SS'], default is starttime and endtime)
It returns:
bsl ... pandas.Dataframe with ID of beacon 1 ('ID'), ID of beacon 2
('range_ID'), calculated baseline lengths in metres ('bsl'), one
way traveltime in seconds ('tt'), sound speed at beacon 1 ('ssp1')
in metres per second, sound speed at beacon 2 ('ssp2') in metres per
second, measured traveltime in milliseconds ('range'), turn around
time in milliseconds ('TAT'), harmonic mean of 'ssp1' and 'ssp2'
('hmssp'), reciprocal of harmonic mean of 'ssp1' and 'ssp2' ('1/v'),
constant baseline length ('bsl_const') in given time window and
standard deviation of the measurements compared to the fitted line
in seconds (sigma = sqrt(sum((tt-S*1/v)^2)/(len(1/v)-1)),
'std_dev_tt') in given time window (and intercept ('intercept') )
with corresponding times of measurement for beacon pair.
"""
# check if columns 'hmssp' and '1/v' (harmonic mean of sound speeds and its
# reciprocal already exist in bsl and if not then calculate them
if not set(['hmssp','1/v']).issubset(bsl.columns):
bsl = calc_hmssp_recp_v(bsl)
# end if not set(['hmssp','1/v']).issubset(bsl.columns):
# copy bsl to new pandas.Dataframe to cut it in time
bsl_new = bsl.copy()
# check if time window for estimation of constant baseline length is given
if starttime is not None:
bsl_new = bsl_new.loc[starttime:]
else:
# set startime to first index in bsl
starttime = bsl_new.index[0]
# end if starttime is not None:
if endtime is not None:
bsl_new = bsl_new.loc[:endtime]
else:
# set endtime to last index in bsl
endtime = bsl_new.index[-1]
# end if endtime is not None:
# the numpy function numpy.linalg.lstsq() needs x as (M,N) matrix
if not intercept:
x = bsl_new['1/v'][:,np.newaxis]
else:
x = np.array(([[bsl_new['1/v'][j], 1] for j in range(len(bsl_new))]))
# end if not intercept:
S,residuals,_,_ = np.linalg.lstsq(x,bsl_new['tt'])
sigma = np.sqrt(residuals/(len(x)-1))
# set column 'bsl_const' for values between starttime and endtime to S and
# column 'std_dev_tt' to estimated sigma in bsl
if val_tw is not None:
starttime = val_tw[0]
endtime = val_tw[1]
# end if val_tw is not None:
if not intercept:
bsl.loc[starttime:endtime,'bsl_const'] = S
else:
bsl.loc[starttime:endtime,'bsl_const'] = S[0]
bsl.loc[starttime:endtime,'intercept'] = S[1]
# end if not intercept:
bsl.loc[starttime:endtime,'std_dev_tt'] = sigma
return(bsl) | 906119dcc66f4ab536d4a89c9c9b633bb6835058 | 3,734 |
def SeasonUPdate(temp):
""" Update appliance characteristics given the change in season
Parameters
----------
temp (obj): appliance set object for an individual season
Returns
----------
app_expected_load (float): expected load power in Watts
app_expected_dur (float): expected duration in hours
appliance_set (list of applience objects): applience list for a given season
t_delta_exp_dur (pandas datetime): expected appliance duration
app_index (array): index for each applience
"""
app_expected_load = temp.app_expected_load
app_expected_dur = temp.app_expected_dur
appliance_set = temp.appliance_set
t_delta_exp_dur = temp.t_delta_exp_dur
app_index = np.arange(0,len(temp.appliance_set))
return app_expected_load,app_expected_dur,appliance_set,t_delta_exp_dur,app_index | 7fdfa932bedf2ac17490df6aaeedb547e1774c4d | 3,735 |
def pad_and_reshape(instr_spec, frame_length, F):
"""
:param instr_spec:
:param frame_length:
:param F:
:returns:
"""
spec_shape = tf.shape(instr_spec)
extension_row = tf.zeros((spec_shape[0], spec_shape[1], 1, spec_shape[-1]))
n_extra_row = (frame_length) // 2 + 1 - F
extension = tf.tile(extension_row, [1, 1, n_extra_row, 1])
extended_spec = tf.concat([instr_spec, extension], axis=2)
old_shape = tf.shape(extended_spec)
new_shape = tf.concat([
[old_shape[0] * old_shape[1]],
old_shape[2:]],
axis=0)
processed_instr_spec = tf.reshape(extended_spec, new_shape)
return processed_instr_spec | 097bc2e8f58f1e947b8f69a6163d1c64d2197f9e | 3,736 |
def GetExclusiveStorageForNodes(cfg, node_uuids):
"""Return the exclusive storage flag for all the given nodes.
@type cfg: L{config.ConfigWriter}
@param cfg: cluster configuration
@type node_uuids: list or tuple
@param node_uuids: node UUIDs for which to read the flag
@rtype: dict
@return: mapping from node uuids to exclusive storage flags
@raise errors.OpPrereqError: if any given node name has no corresponding
node
"""
getflag = lambda n: _GetExclusiveStorageFlag(cfg, n)
flags = map(getflag, node_uuids)
return dict(zip(node_uuids, flags)) | b93625bc2b865530bef0c648885f5615905e54c1 | 3,737 |
import csv
def get_read_data(file, dic, keys):
""" Assigns reads to labels"""
r = csv.reader(open(file))
lines = list(r)
vecs_forwards = []
labels_forwards = []
vecs_reverse = []
labels_reverse = []
for key in keys:
for i in dic[key]:
for j in lines:
if i in j[0]:
if '_2.fq' in j[0] or '_R2_' in j[0]:
vecs_reverse.append(j[2:])
labels_reverse.append(key)
else:
vecs_forwards.append(j[2:])
labels_forwards.append(key)
return np.array(vecs_forwards), np.array(labels_forwards), np.array(vecs_reverse), np.array(labels_reverse) | 355c44cbf83ab9506755bda294723bfd1e8a15c1 | 3,738 |
def removeDuplicates(listToRemoveFrom: list[str]):
"""Given list, returns list without duplicates"""
listToReturn: list[str] = []
for item in listToRemoveFrom:
if item not in listToReturn:
listToReturn.append(item)
return listToReturn | 8265e7c560d552bd9e30c0a1140d6668abd1b4d6 | 3,739 |
def check_hms_angle(value):
"""
Validating function for angle sexagesimal representation in hours.
Used in the rich_validator
"""
if isinstance(value, list):
raise validate.ValidateError("expected value angle, found list")
match = hms_angle_re.match(value)
if not match:
raise VdtAngleError("not a valid hour angle: %s" % value)
return hms_to_angle(match.groups()) | bf1b6ec14cc131263913c331cb1d3cb9a06cdc76 | 3,740 |
def stats():
"""Retrives the count of each object type.
Returns:
JSON object with the number of objects by type."""
return jsonify({
"amenities": storage.count("Amenity"),
"cities": storage.count("City"),
"places": storage.count("Place"),
"reviews": storage.count("Review"),
"states": storage.count("State"),
"users": storage.count("User")
}) | 31ebd630381fe33cdbff507a3d34497423dfd621 | 3,742 |
def addflux2pix(px,py,pixels,fmod):
"""Usage: pixels=addflux2pix(px,py,pixels,fmod)
Drizel Flux onto Pixels using a square PSF of pixel size unity
px,py are the pixel position (integers)
fmod is the flux calculated for (px,py) pixel
and it has the same length as px and py
pixels is the image.
"""
xmax = pixels.shape[0] #Size of pixel array
ymax = pixels.shape[1]
pxmh = px-0.5 #location of reference corner of PSF square
pymh = py-0.5
dx = np.floor(px+0.5)-pxmh
dy = np.floor(py+0.5)-pymh
# Supposing right-left as x axis and up-down as y axis:
# Lower left pixel
npx = int(pxmh) #Numpy arrays start at zero
npy = int(pymh)
#print('n',npx,npy)
if (npx >= 0) & (npx < xmax) & (npy >= 0) & (npy < ymax) :
pixels[npx,npy]=pixels[npx,npy]+fmod*dx*dy
#Same operations are done for the 3 pixels other neighbouring pixels
# Lower right pixel
npx = int(pxmh)+1 #Numpy arrays start at zero
npy = int(pymh)
if (npx >= 0) & (npx < xmax) & (npy >= 0) & (npy < ymax) :
pixels[npx,npy]=pixels[npx,npy]+fmod*(1.0-dx)*dy
# Upper left pixel
npx = int(pxmh) #Numpy arrays start at zero
npy = int(pymh)+1
if (npx >= 0) & (npx < xmax) & (npy >= 0) & (npy < ymax) :
pixels[npx,npy]=pixels[npx,npy]+fmod*dx*(1.0-dy)
# Upper right pixel
npx = int(pxmh)+1 #Numpy arrays start at zero
npy = int(pymh)+1
if (npx >= 0) & (npx < xmax) & (npy >= 0) & (npy < ymax) :
pixels[npx,npy]=pixels[npx,npy]+fmod*(1.0-dx)*(1.0-dy)
return pixels; | 808f99dac20cda962146fee8f2b9878a07804f9b | 3,743 |
def get_dea_landsat_vrt_dict(feat_list):
"""
this func is designed to take all releveant landsat bands
on the dea public database for each scene in stac query.
it results in a list of vrts for each band seperately and maps
them to a dict where band name is the key, list is the value pair.
"""
# notify
print('Getting landsat vrts for each relevant bands.')
# check features type, length
if not isinstance(feat_list, list):
raise TypeError('Features must be a list of xml objects.')
elif not len(feat_list) > 0:
raise ValueError('No features provided.')
# required dea landsat ard band names
bands = [
'nbart_blue',
'nbart_green',
'nbart_red',
'nbart_nir',
'nbart_swir_1',
'nbart_swir_2',
'oa_fmask'
]
# iter each band name and build associated vrt list
band_vrts_dict = {}
for band in bands:
print('Building landsat vrt list for band: {}.'.format(band))
band_vrts_dict[band] = make_vrt_list(feat_list, band=band)
# notify and return
print('Got {} landsat vrt band lists successfully.'.format(len(band_vrts_dict)))
return band_vrts_dict | 79009cc9fbcd085c8e95cf15f4271419d598d1ce | 3,744 |
def is_zh_l_bracket(uni_ch):
"""判断一个 unicode 是否是中文左括号。"""
if uni_ch == u'\uff08':
return True
else:
return False | 3ba18418005824a51de380c898726d050d464ec2 | 3,746 |
def petlink32_to_dynamic_projection_mMR(filename,n_packets,n_radial_bins,n_angles,n_sinograms,time_bins,n_axial,n_azimuthal,angles_axial,angles_azimuthal,size_u,size_v,n_u,n_v,span,n_segments,segments_sizes,michelogram_segments,michelogram_planes, status_callback):
"""Make dynamic compressed projection from list-mode data. """
descriptor = [ {'name':'filename', 'type':'string', 'value':filename ,'size':len(filename)},
{'name':'n_packets', 'type':'long', 'value':n_packets },
{'name':'n_radial_bins', 'type':'uint', 'value':n_radial_bins },
{'name':'n_angles', 'type':'uint', 'value':n_angles },
{'name':'n_sinograms', 'type':'uint', 'value':n_sinograms },
{'name':'n_time_bins', 'type':'uint', 'value':len(time_bins)-1 },
{'name':'time_bins', 'type':'array', 'value':np.int32(time_bins) },
{'name':'n_axial', 'type':'uint', 'value':n_axial },
{'name':'n_azimuthal', 'type':'uint', 'value':n_azimuthal },
{'name':'angles_axial', 'type':'array', 'value':angles_axial },
{'name':'angles_azimuthal', 'type':'array', 'value':angles_azimuthal },
{'name':'size_u', 'type':'float', 'value':size_u },
{'name':'size_v', 'type':'float', 'value':size_v },
{'name':'n_u', 'type':'uint', 'value':n_u },
{'name':'n_v', 'type':'uint', 'value':n_v },
{'name':'span', 'type':'uint', 'value':span },
{'name':'n_segments', 'type':'uint', 'value':n_segments },
{'name':'segments_sizes', 'type':'array', 'value':np.int32(segments_sizes) },
{'name':'michelogram_segments', 'type':'array', 'value':np.int32(michelogram_segments) },
{'name':'michelogram_planes', 'type':'array', 'value':np.int32(michelogram_planes) },
{'name':'status_callback', 'type':'function','value':status_callback, 'arg_types':['uint'] }, ]
r = call_c_function( mMR_c.petlink32_to_dynamic_projection_mMR_michelogram, descriptor )
if not r.status == petlink.status_success():
raise ErrorInCFunction("The execution of 'petlink32_to_dynamic_projection_mMR_michelogram' was unsuccessful.",r.status,'mMR_c.petlink32_to_dynamic_projection_mMR')
return r.dictionary | 9764da2a2fb0c021274133fdd46661a44cf0dc31 | 3,747 |
from typing import Dict
def is_core_recipe(component: Dict) -> bool:
"""
Returns True if a recipe component contains a "Core Recipe"
preparation.
"""
preparations = component.get('recipeItem', {}).get('preparations') or []
return any(prep.get('id') == PreparationEnum.CORE_RECIPE.value for prep in preparations) | 451798c6f31297a80ac43db00243fb2dd85ced46 | 3,748 |
def build_estimator(output_dir, first_layer_size, num_layers, dropout,
learning_rate, save_checkpoints_steps):
"""Builds and returns a DNN Estimator, defined by input parameters.
Args:
output_dir: string, directory to save Estimator.
first_layer_size: int, size of first hidden layer of DNN.
num_layers: int, number of hidden layers.
dropout: float, dropout rate used in training.
learning_rate: float, learning_rate used in training.
save_checkpoints_steps: int, training steps to save Estimator.
Returns:
`Estimator` instance.
"""
# Sets head to default head for DNNClassifier with two classes.
model_params = {
'head':
head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(),
'feature_columns': [
tf.feature_column.numeric_column(c, shape=[])
for c in constants.FEATURE_COLUMNS
],
'hidden_units': [
max(int(first_layer_size / (pow(2, i))), 2)
for i in range(int(num_layers))
],
'dropout':
dropout,
'optimizer':
tf.train.AdagradOptimizer(learning_rate)
}
def _model_fn(features, labels, mode, params):
"""Build TF graph based on canned DNN classifier."""
key_column = features.pop(constants.KEY_COLUMN, None)
if key_column is None:
raise ValueError('Key is missing from features.')
spec = _dnn_model_fn(features=features, labels=labels, mode=mode, **params)
predictions = spec.predictions
if predictions:
predictions[constants.KEY_COLUMN] = tf.convert_to_tensor_or_sparse_tensor(
key_column)
spec = spec._replace(predictions=predictions)
spec = spec._replace(export_outputs={
'classes': tf.estimator.export.PredictOutput(predictions)
})
return spec
config = tf.estimator.RunConfig(save_checkpoints_steps=save_checkpoints_steps)
return tf.estimator.Estimator(
model_fn=_model_fn,
model_dir=output_dir,
config=config,
params=model_params) | 339e26fd910aa7412b8e2b66845718e440ccada6 | 3,749 |
import json
def importConfig():
"""設定ファイルの読み込み
Returns:
tuple:
str: interface,
str: alexa_remote_control.sh path
list: device list
"""
with open("config.json", "r", encoding="utf-8") as f:
config = json.load(f)
interface = config["interface"]
if not interface:
return False
arc_path = config["arc_path"]
devices = config["device_list"]
return (interface, arc_path, devices) | 84f8fc0deec4aebfe48209b01d1a35f7373d31e6 | 3,750 |
from typing import List
from typing import Dict
from typing import Any
def create_local_command(opts: Options, jobs: List[Dict[str, Any]], jobs_metadata: List[Options]) -> str:
"""Create a terminal command to run the jobs locally."""
cmd = ""
for meta, job in zip(jobs_metadata, jobs):
input_file = meta.input.absolute().as_posix()
workdir = meta.workdir.absolute().as_posix()
# Run locally
cmd += f'cd {workdir} && {opts.command} {input_file} & '
return cmd | f5d23c1fb2271b44a323d1d17e9dda35df29fcd7 | 3,752 |
import time
def time_for_log() -> str:
"""Function that print the current time for bot prints"""
return time.strftime("%d/%m %H:%M:%S - ") | 0f964d5c827782ff8cc433e57bb3e78d0a7c7cba | 3,753 |
import math
def _is_int(n) -> bool:
"""
is_int 是判断给定数字 n 是否为整数,
在判断中 n 小于epsilon的小数部分将被忽略,
是则返回 True,否则 False
:param n: 待判断的数字
:return: True if n is A_ub integer, False else
"""
return (n - math.floor(n) < _epsilon) or (math.ceil(n) - n < _epsilon) | 076a82d245333890d6790f65a58e5507905ca68f | 3,754 |
def _cpp_het_stat(A, t_stop, rates, t_start=0. * pq.ms):
"""
Generate a Compound Poisson Process (CPP) with amplitude distribution
A and heterogeneous firing rates r=r[0], r[1], ..., r[-1].
Parameters
----------
A : np.ndarray
CPP's amplitude distribution. A[j] represents the probability of
a synchronous event of size j among the generated spike trains.
The sum over all entries of A must be equal to one.
t_stop : pq.Quantity
The end time of the output spike trains
rates : pq.Quantity
Array of firing rates of each spike train generated with
t_start : pq.Quantity, optional
The start time of the output spike trains
Default: 0 pq.ms
Returns
-------
list of neo.SpikeTrain
List of neo.SpikeTrains with different firing rates, forming
a CPP with amplitude distribution `A`.
"""
# Computation of Parameters of the two CPPs that will be merged
# (uncorrelated with heterog. rates + correlated with homog. rates)
n_spiketrains = len(rates) # number of output spike trains
# amplitude expectation
expected_amplitude = np.dot(A, np.arange(n_spiketrains + 1))
r_sum = np.sum(rates) # sum of all output firing rates
r_min = np.min(rates) # minimum of the firing rates
# rate of the uncorrelated CPP
r_uncorrelated = r_sum - n_spiketrains * r_min
# rate of the correlated CPP
r_correlated = r_sum / expected_amplitude - r_uncorrelated
# rate of the hidden mother process
r_mother = r_uncorrelated + r_correlated
# Check the analytical constraint for the amplitude distribution
if A[1] < (r_uncorrelated / r_mother).rescale(
pq.dimensionless).magnitude:
raise ValueError('A[1] too small / A[i], i>1 too high')
# Compute the amplitude distribution of the correlated CPP, and generate it
A = A * (r_mother / r_correlated).magnitude
A[1] = A[1] - r_uncorrelated / r_correlated
compound_poisson_spiketrains = _cpp_hom_stat(
A, t_stop, r_min, t_start)
# Generate the independent heterogeneous Poisson processes
poisson_spiketrains = \
[homogeneous_poisson_process(rate - r_min, t_start, t_stop)
for rate in rates]
# Pool the correlated CPP and the corresponding Poisson processes
return [_pool_two_spiketrains(compound_poisson_spiketrain,
poisson_spiketrain)
for compound_poisson_spiketrain, poisson_spiketrain
in zip(compound_poisson_spiketrains, poisson_spiketrains)] | adc00577e9a6cb1ff7f9e0313befe98c81332ab1 | 3,755 |
def return_bad_parameter_config() -> CloudSettings:
"""Return a wrongly configured cloud config class."""
CloudSettingsTest = CloudSettings( # noqa: N806
settings_order=[
"init_settings",
"aws_parameter_setting",
"file_secret_settings",
"env_settings",
]
) # noqa: N806
class AWSSettings(CloudSettingsTest): # type: ignore
test: str = "Cool"
prefix_test_store: str = ""
return AWSSettings() | 06f8af87873d571be9c5ae7fd2e563402e57b2d0 | 3,756 |
def update(isamAppliance, instance_id, id, filename=None, contents=None, check_mode=False, force=False):
"""
Update a file in the administration pages root
:param isamAppliance:
:param instance_id:
:param id:
:param name:
:param contents:
:param check_mode:
:param force:
:return:
"""
if force is True or _check_file(isamAppliance, instance_id, id) is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
if filename is not None:
return isamAppliance.invoke_put_files(
"Update a file in the administration page root",
"/wga/reverseproxy/{0}/management_root/{1}".format(instance_id, id),
[
{
'file_formfield': 'file',
'filename': filename,
'mimetype': 'application/octet-stream'
}
],
{
'file': filename,
'type': 'file'
})
elif contents is not None:
return isamAppliance.invoke_put_files(
"Update a file in the administration page root",
"/wga/reverseproxy/{0}/management_root/{1}".format(instance_id, id),
{
'contents': contents,
'type': 'file'
})
else:
return isamAppliance.create_return_object(
warnings=["Either contents or filename parameter need to be provided. Skipping update request."]) | af0b95096638fb34af130623b0929c4394a1a845 | 3,757 |
def view_deflate_encoded_content():
"""Returns Deflate-encoded data.
---
tags:
- Response formats
produces:
- application/json
responses:
200:
description: Defalte-encoded data.
"""
return jsonify(get_dict("origin", "headers", method=request.method, deflated=True)) | ff8d39f75a6cb526b3a61e85234e71efa174a208 | 3,758 |
def predict_from_word_vectors_matrix(tokens, matrix, nlp, POS="NOUN", top_number=constants.DEFAULT_TOP_ASSOCIATIONS):
"""
Make a prediction based on the word vectors
:param tokens:
:param matrix:
:param nlp:
:param POS:
:param top_number:
:return:
"""
vector_results = collect_word_vector_associations(tokens, matrix)
top_results = get_top_results(vector_results, nlp, top_number, POS)
return top_results | 6a491e481238af932994bb8d383baca4da1ebd55 | 3,759 |
def blendImg(img_a, img_b, α=0.8, β=1., γ=0.):
"""
The result image is computed as follows:
img_a * α + img_b * β + γ
"""
return cv2.addWeighted(img_a, α, img_b, β, γ) | f60918ba424b0d59e9025c088c0f2c9a3f739fde | 3,762 |
def genoimc_dup4_loc():
"""Create genoimc dup4 sequence location"""
return {
"_id": "ga4gh:VSL.us51izImAQQWr-Hu6Q7HQm-vYvmb-jJo",
"sequence_id": "ga4gh:SQ.-A1QmD_MatoqxvgVxBLZTONHz9-c7nQo",
"interval": {
"type": "SequenceInterval",
"start": {
"value": 30417575,
"comparator": "<=",
"type": "IndefiniteRange"
},
"end": {
"value": 31394018,
"comparator": ">=",
"type": "IndefiniteRange"
}
},
"type": "SequenceLocation"
} | 3ea1b39fed22487bebffc78d45cb493b7d7afa4a | 3,764 |
def compare_versions(a, b):
"""Return 0 if a == b, 1 if a > b, else -1."""
a, b = version_to_ints(a), version_to_ints(b)
for i in range(min(len(a), len(b))):
if a[i] > b[i]:
return 1
elif a[i] < b[i]:
return -1
return 0 | 0b22589164f7d3731edc34af97d306186e677371 | 3,765 |
def get_machine_action_data(machine_action_response):
"""Get machine raw response and returns the machine action info in context and human readable format.
Notes:
Machine action is a collection of actions you can apply on the machine, for more info
https://docs.microsoft.com/en-us/windows/security/threat-protection/microsoft-defender-atp/machineaction
Returns:
dict. Machine action's info
"""
action_data = \
{
"ID": machine_action_response.get('id'),
"Type": machine_action_response.get('type'),
"Scope": machine_action_response.get('scope'),
"Requestor": machine_action_response.get('requestor'),
"RequestorComment": machine_action_response.get('requestorComment'),
"Status": machine_action_response.get('status'),
"MachineID": machine_action_response.get('machineId'),
"ComputerDNSName": machine_action_response.get('computerDnsName'),
"CreationDateTimeUtc": machine_action_response.get('creationDateTimeUtc'),
"LastUpdateTimeUtc": machine_action_response.get('lastUpdateTimeUtc'),
"RelatedFileInfo": {
"FileIdentifier": machine_action_response.get('fileIdentifier'),
"FileIdentifierType": machine_action_response.get('fileIdentifierType')
},
"Commands": machine_action_response.get('commands')
}
return action_data | 1e0ffc37d8d3b5662b64ec28cb850c6277b1bad2 | 3,766 |
import torch
def convolutionalize(modules, input_size):
"""
Recast `modules` into fully convolutional form.
The conversion transfers weights and infers kernel sizes from the
`input_size` and modules' action on it.
n.b. This only handles the conversion of linear/fully-connected modules,
although other module types could require conversion for correctness.
"""
fully_conv_modules = []
x = torch.zeros((1, ) + input_size)
for m in modules:
if isinstance(m, nn.Linear):
n = nn.Conv2d(x.size(1), m.weight.size(0), kernel_size=(x.size(2), x.size(3)))
n.weight.data.view(-1).copy_(m.weight.data.view(-1))
n.bias.data.view(-1).copy_(m.bias.data.view(-1))
m = n
fully_conv_modules.append(m)
x = m(x)
return fully_conv_modules | 5693a17bac0f39538bfcada3280ce06ef91230a3 | 3,768 |
def is_unique2(s):
"""
Use a list and the int of the character will tell if that character has
already appeared once
"""
d = []
for t in s:
if d[int(t)]:
return False
d[int(t)] = True
return True | b1a1bdea8108690a0e227fd0b75f910bd6b99a07 | 3,769 |
import random
def uncomplete_tree_parallel(x:ATree, mode="full"):
""" Input is tuple (nl, fl, split)
Output is a randomly uncompleted tree,
every node annotated whether it's terminated and what actions are good at that node
"""
fl = x
fl.parent = None
add_descendants_ancestors(fl)
y = ATree("@START@", [])
y.align = fl
y.is_open = True
i = 0
y = assign_gold_actions(y, mode=mode)
choices = [deepcopy(y)] # !! can't cache because different choices !
while not all_terminated(y):
y = mark_for_execution(y, mode=mode)
y = execute_chosen_actions(y, mode=mode)
y = assign_gold_actions(y, mode=mode)
y = adjust_gold(y, mode=mode)
choices.append(deepcopy(y))
i += 1
ret = random.choice(choices[:-1])
return ret | f59e0f0279c9c439034116f769f51d60a924c4af | 3,770 |
def stations_by_river(stations):
"""Give a dictionary to hold the rivers name as keys and their corresponding stations' name as values"""
rivers_name = []
for i in stations:
if i.river not in rivers_name:
rivers_name.append(i.river)
elif i.river in rivers_name:
continue
big_list = []
for n in rivers_name:
lists = []
for y in stations:
if n == y.river:
lists.append(y.name)
elif n != y.river:
continue
lists = sorted(lists)
big_list.append(lists)
dictionary = dict(zip(rivers_name, big_list))
dicti = {}
for key in sorted(dictionary):
dicti.update({key : dictionary[key]})
assert dicti != {}
return dicti | 66fd928415619d175b7069b8c3103a3f7d930aac | 3,771 |
def QA_SU_save_huobi(frequency):
"""
Save huobi kline "smart"
"""
if (frequency not in ["1d", "1day", "day"]):
return QA_SU_save_huobi_min(frequency)
else:
return QA_SU_save_huobi_day(frequency) | cdea45afe6d7e0b61dea517adb8fc484e8eafa38 | 3,772 |
def inverse(a):
"""
[description]
calculating the inverse of the number of characters,
we do this to be able to find our departure when we arrive.
this part will be used to decrypt the message received.
:param a: it is an Int
:return: x -> it is an Int
"""
x = 0
while a * x % 97 != 1:
x = x + 1
return x | 2893d2abda34e4573eb5d9602edc0f8e14246e09 | 3,774 |
from typing import Optional
from typing import Union
def currency_column_to_numeric(
df: pd.DataFrame,
column_name: str,
cleaning_style: Optional[str] = None,
cast_non_numeric: Optional[dict] = None,
fill_all_non_numeric: Optional[Union[float, int]] = None,
remove_non_numeric: bool = False,
) -> pd.DataFrame:
"""Convert currency column to numeric.
This method does not mutate the original DataFrame.
This method allows one to take a column containing currency values,
inadvertently imported as a string, and cast it as a float. This is
usually the case when reading CSV files that were modified in Excel.
Empty strings (i.e. `''`) are retained as `NaN` values.
Example:
>>> import pandas as pd
>>> import janitor
>>> df = pd.DataFrame({
... "a_col": [" 24.56", "-", "(12.12)", "1,000,000"],
... "d_col": ["", "foo", "1.23 dollars", "-1,000 yen"],
... })
>>> df # doctest: +NORMALIZE_WHITESPACE
a_col d_col
0 24.56
1 - foo
2 (12.12) 1.23 dollars
3 1,000,000 -1,000 yen
The default cleaning style.
>>> df.currency_column_to_numeric("d_col")
a_col d_col
0 24.56 NaN
1 - NaN
2 (12.12) 1.23
3 1,000,000 -1000.00
The accounting cleaning style.
>>> df.currency_column_to_numeric("a_col", cleaning_style="accounting") # doctest: +NORMALIZE_WHITESPACE
a_col d_col
0 24.56
1 0.00 foo
2 -12.12 1.23 dollars
3 1000000.00 -1,000 yen
Valid cleaning styles are:
- `None`: Default cleaning is applied. Empty strings are always retained as
`NaN`. Numbers, `-`, `.` are extracted and the resulting string
is cast to a float.
- `'accounting'`: Replaces numbers in parentheses with negatives, removes commas.
:param df: The pandas DataFrame.
:param column_name: The column containing currency values to modify.
:param cleaning_style: What style of cleaning to perform.
:param cast_non_numeric: A dict of how to coerce certain strings to numeric
type. For example, if there are values of 'REORDER' in the DataFrame,
`{'REORDER': 0}` will cast all instances of 'REORDER' to 0.
Only takes effect in the default cleaning style.
:param fill_all_non_numeric: Similar to `cast_non_numeric`, but fills all
strings to the same value. For example, `fill_all_non_numeric=1`, will
make everything that doesn't coerce to a currency `1`.
Only takes effect in the default cleaning style.
:param remove_non_numeric: If set to True, rows of `df` that contain
non-numeric values in the `column_name` column will be removed.
Only takes effect in the default cleaning style.
:raises ValueError: If `cleaning_style` is not one of the accepted styles.
:returns: A pandas DataFrame.
""" # noqa: E501
check("column_name", column_name, [str])
check_column(df, column_name)
column_series = df[column_name]
if cleaning_style == "accounting":
df.loc[:, column_name] = df[column_name].apply(
_clean_accounting_column
)
return df
if cleaning_style is not None:
raise ValueError(
"`cleaning_style` is expected to be one of ('accounting', None). "
f"Got {cleaning_style!r} instead."
)
if cast_non_numeric:
check("cast_non_numeric", cast_non_numeric, [dict])
_make_cc_patrial = partial(
_currency_column_to_numeric,
cast_non_numeric=cast_non_numeric,
)
column_series = column_series.apply(_make_cc_patrial)
if remove_non_numeric:
df = df.loc[column_series != "", :]
# _replace_empty_string_with_none is applied here after the check on
# remove_non_numeric since "" is our indicator that a string was coerced
# in the original column
column_series = _replace_empty_string_with_none(column_series)
if fill_all_non_numeric is not None:
check("fill_all_non_numeric", fill_all_non_numeric, [int, float])
column_series = column_series.fillna(fill_all_non_numeric)
column_series = _replace_original_empty_string_with_none(column_series)
df = df.assign(**{column_name: pd.to_numeric(column_series)})
return df | e382752e5aff389872da69f42a3ec62785df336f | 3,775 |
async def subreddit_type_submissions(sub="wallstreetbets", kind="new"):
"""
"""
comments = []
articles = []
red = await reddit_instance()
subreddit = await red.subreddit(sub)
if kind == "hot":
submissions = subreddit.hot()
elif kind == "top":
submissions = subreddit.top()
elif kind == "new":
submissions = subreddit.new()
elif kind == "random_rising":
submissions = subreddit.random_rising()
else:
submissions = subreddit.random()
async for submission in submissions:
article = clean_submission(submission)
article['subreddit'] = sub
articles.append(article)
top_level_comments = await submission.comments()
print(f"📗 Looking at submission: {article['title'][:40]}...")
for top_level_comment in top_level_comments:
if isinstance(top_level_comment, MoreComments):
continue
comment = clean_comment(top_level_comment)
print(f"🗯️ ... {comment['author']} said {comment['body'][:40]}")
comment['article_id'] = article['id']
comments.append(comment)
return (articles, comments) | 9cc8655575ca8fd3729e220b0ee3fc8e45e4ed56 | 3,776 |
import typing
import json
def _get_bundle_manifest(
uuid: str,
replica: Replica,
version: typing.Optional[str],
*,
bucket: typing.Optional[str] = None) -> typing.Optional[dict]:
"""
Return the contents of the bundle manifest file from cloud storage, subject to the rules of tombstoning. If version
is None, return the latest version, once again, subject to the rules of tombstoning.
If the bundle cannot be found, return None
"""
uuid = uuid.lower()
handle = Config.get_blobstore_handle(replica)
default_bucket = replica.bucket
# need the ability to use fixture bucket for testing
bucket = default_bucket if bucket is None else bucket
def tombstone_exists(uuid: str, version: typing.Optional[str]):
return test_object_exists(handle, bucket, BundleTombstoneID(uuid=uuid, version=version).to_key())
# handle the following deletion cases
# 1. the whole bundle is deleted
# 2. the specific version of the bundle is deleted
if tombstone_exists(uuid, None) or (version and tombstone_exists(uuid, version)):
return None
# handle the following deletion case
# 3. no version is specified, we want the latest _non-deleted_ version
if version is None:
# list the files and find the one that is the most recent.
prefix = f"bundles/{uuid}."
object_names = handle.list(bucket, prefix)
version = _latest_version_from_object_names(object_names)
if version is None:
# no matches!
return None
bundle_fqid = BundleFQID(uuid=uuid, version=version)
# retrieve the bundle metadata.
try:
bundle_manifest_blob = handle.get(bucket, bundle_fqid.to_key()).decode("utf-8")
return json.loads(bundle_manifest_blob)
except BlobNotFoundError:
return None | 7881e1514a9a645c1f7ee6479baa6e74bae4dabb | 3,778 |
def handler400(request, exception):
"""
This is a Django handler function for 400 Bad Request error
:param request: The Django Request object
:param exception: The exception caught
:return: The 400 error page
"""
context = get_base_context(request)
context.update({
'message': {
'title': '400 Bad Request',
'description': 'Your client has issued a malformed or illegal request.'
}
})
return render(request, 'velarium/base.html', context=context, status=400) | 0dc1b81ec86d675f348728863dfe07efbd936e8e | 3,779 |
def _gather_topk_beams(nested, score_or_log_prob, batch_size, beam_size):
"""Gather top beams from nested structure."""
_, topk_indexes = tf.nn.top_k(score_or_log_prob, k=beam_size)
return _gather_beams(nested, topk_indexes, batch_size, beam_size) | ebdaf391104a3f271a42549708f3e7adfaf8b0b0 | 3,781 |
import scipy
import numpy
def _traceinv_exact(K, B, C, matrix, gram, exponent):
"""
Finds traceinv directly for the purpose of comparison.
"""
# Exact solution of traceinv for band matrix
if B is not None:
if scipy.sparse.isspmatrix(K):
K_ = K.toarray()
B_ = B.toarray()
if C is not None:
C_ = C.toarray()
else:
K_ = K
B_ = B
if C is not None:
C_ = C
if exponent == 0:
if C is not None:
traceinv_exact = numpy.trace(C_ @ B_)
else:
traceinv_exact = numpy.trace(B_)
else:
if gram:
K_ = numpy.matmul(K_.T, K_)
if exponent > 1:
K1 = K_.copy()
for i in range(1, exponent):
K_ = numpy.matmul(K_, K1)
Kinv = numpy.linalg.inv(K_)
Op = numpy.matmul(Kinv, B_)
if C is not None:
Op = Kinv @ C_ @ Op
traceinv_exact = numpy.trace(Op)
elif exponent == 1 and not gram:
# B is identity. Using analytic formula.
traceinv_exact = band_matrix_traceinv(matrix['a'], matrix['b'],
matrix['size'], True)
else:
# B and C are identity. Compute traceinv directly.
if scipy.sparse.isspmatrix(K):
K_ = K.toarray()
else:
K_ = K
if exponent == 0:
traceinv_exact = K_.shape[0]
else:
if gram:
K_ = numpy.matmul(K_.T, K_)
K_temp = K_.copy()
for i in range(1, exponent):
K_ = numpy.matmul(K_, K_temp)
Kinv = numpy.linalg.inv(K_)
traceinv_exact = numpy.trace(Kinv)
return traceinv_exact | 3637a5aa726ef1bf8489783c435c429b59422240 | 3,782 |
def create_feature_vector_of_mean_mfcc_for_song(song_file_path: str) -> ndarray:
"""
Takes in a file path to a song segment and returns a numpy array containing the mean mfcc values
:param song_file_path: str
:return: ndarray
"""
song_segment, sample_rate = librosa.load(song_file_path)
mfccs = librosa.feature.mfcc(y=song_segment, sr=sample_rate, n_mfcc=NUMBER_OF_MFCC)
mfccs_processed = np.mean(mfccs.T, axis=0)
df = pd.DataFrame(mfccs_processed)
z_score_normalized_mfccs = (df.values - df.values.mean()) / df.values.std()
z_score_normalized_mfccs = np.array([i[0] for i in z_score_normalized_mfccs])
return z_score_normalized_mfccs | 8992feafd483bfe7b4af5e715ba1455884e1b710 | 3,783 |
def stations_highest_rel_level(stations, N):
"""Returns a list containing the names of the N stations
with the highest water level relative to the typical range"""
names = [] # create list for names
levels = [] # create list for levels
for i in range(len(stations)): # iterate through stations
if stations[i].relative_water_level() is not None:
# ^checks for valid relative water level
names.append(stations[i].name)
levels.append(stations[i].relative_water_level())
# ^adds names and levels to respective lists
combined = list(zip(names, levels)) # combines names and levels
combined.sort(key=lambda x: x[1], reverse=1) # sorts in reverse
output = [] # create output list
for i in range(N): # iterate up to N
output.append(combined[i][0]) # add station name to output
return output | 780a03a424c9b2f0dedee2e93eb9bd27cc1fce36 | 3,784 |
def add_global_nodes_edges(g_nx : nx.Graph, feat_data: np.ndarray, adj_list: np.ndarray,
g_feat_data: np.ndarray, g_adj_list: np.ndarray):
"""
:param g_nx:
:param feat_data:
:param adj_list:
:param g_feat_data:
:param g_adj_list:
:return:
"""
feat_data = np.concatenate([feat_data, g_feat_data], 0)
# adj_list.update((k, adj_list[k].union(g_adj_list[k])) for k in range(len(g_adj_list)))
adj_list.update((k, adj_list[k].union(g_adj_list[k])) for k in range(len(feat_data)))
g_edge_list = [[[k, v] for v in vs] for k, vs in g_adj_list.items()]
g_edge_list = [x for sublist in g_edge_list for x in sublist]
g_nx.add_edges_from(g_edge_list)
return g_nx, feat_data, adj_list | 1097becfe88f05008541aaa6c3c074fcd5c3149a | 3,786 |
def get_data_collector_instance(args, config):
"""Get the instance of the data
:param args: arguments of the script
:type args: Namespace
:raises NotImplementedError: no data collector implemented for given data source
:return: instance of the specific data collector
:rtype: subclass of BaseDataCollector
"""
if args.data_source == DATA_SOURCE_RSS:
return RssDataCollector(args.base_url, config[CONFIG_RSS_HEADER])
elif args.data_source == DATA_SOURCE_REDDIT:
return RedditDataCollector(REDDIT_CLIENT_ID, REDDIT_CLIENT_SECRET)
elif args.data_source == DATA_SOURCE_TWITTER:
return TwitterDataCollector(TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET, TWITTER_BEARER_TOKEN)
else:
raise NotImplementedError | 75fda1231e1489da4b0c10473c9f657b143047c1 | 3,788 |
def timeIntegration(params):
"""Sets up the parameters for time integration
:param params: Parameter dictionary of the model
:type params: dict
:return: Integrated activity variables of the model
:rtype: (numpy.ndarray,)
"""
dt = params["dt"] # Time step for the Euler intergration (ms)
duration = params["duration"] # imulation duration (ms)
RNGseed = params["seed"] # seed for RNG
# ------------------------------------------------------------------------
# local parameters
# See Papadopoulos et al., Relations between large-scale brain connectivity and effects of regional stimulation
# depend on collective dynamical state, arXiv, 2020
tau_exc = params["tau_exc"] #
tau_inh = params["tau_inh"] #
c_excexc = params["c_excexc"] #
c_excinh = params["c_excinh"] #
c_inhexc = params["c_inhexc"] #
c_inhinh = params["c_inhinh"] #
a_exc = params["a_exc"] #
a_inh = params["a_inh"] #
mu_exc = params["mu_exc"] #
mu_inh = params["mu_inh"] #
# external input parameters:
# Parameter of the Ornstein-Uhlenbeck process for the external input(ms)
tau_ou = params["tau_ou"]
# Parameter of the Ornstein-Uhlenbeck (OU) process for the external input ( mV/ms/sqrt(ms) )
sigma_ou = params["sigma_ou"]
# Mean external excitatory input (OU process) (mV/ms)
exc_ou_mean = params["exc_ou_mean"]
# Mean external inhibitory input (OU process) (mV/ms)
inh_ou_mean = params["inh_ou_mean"]
# ------------------------------------------------------------------------
# global coupling parameters
# Connectivity matrix
# Interareal relative coupling strengths (values between 0 and 1), Cmat(i,j) connection from jth to ith
Cmat = params["Cmat"]
N = len(Cmat) # Number of nodes
K_gl = params["K_gl"] # global coupling strength
# Interareal connection delay
lengthMat = params["lengthMat"]
signalV = params["signalV"]
if N == 1:
Dmat = np.zeros((N, N))
else:
# Interareal connection delays, Dmat(i,j) Connnection from jth node to ith (ms)
Dmat = dp.computeDelayMatrix(lengthMat, signalV)
Dmat[np.eye(len(Dmat)) == 1] = np.zeros(len(Dmat))
Dmat_ndt = np.around(Dmat / dt).astype(int) # delay matrix in multiples of dt
params["Dmat_ndt"] = Dmat_ndt
# ------------------------------------------------------------------------
# Initialization
# Floating point issue in np.arange() workaraound: use integers in np.arange()
t = np.arange(1, round(duration, 6) / dt + 1) * dt # Time variable (ms)
sqrt_dt = np.sqrt(dt)
max_global_delay = np.max(Dmat_ndt)
startind = int(max_global_delay + 1) # timestep to start integration at
exc_ou = params["exc_ou"]
inh_ou = params["inh_ou"]
exc_ext = params["exc_ext"]
inh_ext = params["inh_ext"]
# state variable arrays, have length of t + startind
# they store initial conditions AND simulated data
excs = np.zeros((N, startind + len(t)))
inhs = np.zeros((N, startind + len(t)))
# ------------------------------------------------------------------------
# Set initial values
# if initial values are just a Nx1 array
if np.shape(params["exc_init"])[1] == 1:
exc_init = np.dot(params["exc_init"], np.ones((1, startind)))
inh_init = np.dot(params["inh_init"], np.ones((1, startind)))
# if initial values are a Nxt array
else:
exc_init = params["exc_init"][:, -startind:]
inh_init = params["inh_init"][:, -startind:]
# xsd = np.zeros((N,N)) # delayed activity
exc_input_d = np.zeros(N) # delayed input to x
inh_input_d = np.zeros(N) # delayed input to y
np.random.seed(RNGseed)
# Save the noise in the activity array to save memory
excs[:, startind:] = np.random.standard_normal((N, len(t)))
inhs[:, startind:] = np.random.standard_normal((N, len(t)))
excs[:, :startind] = exc_init
inhs[:, :startind] = inh_init
noise_exc = np.zeros((N,))
noise_inh = np.zeros((N,))
# ------------------------------------------------------------------------
return timeIntegration_njit_elementwise(
startind,
t,
dt,
sqrt_dt,
N,
Cmat,
K_gl,
Dmat_ndt,
excs,
inhs,
exc_input_d,
inh_input_d,
exc_ext,
inh_ext,
tau_exc,
tau_inh,
a_exc,
a_inh,
mu_exc,
mu_inh,
c_excexc,
c_excinh,
c_inhexc,
c_inhinh,
noise_exc,
noise_inh,
exc_ou,
inh_ou,
exc_ou_mean,
inh_ou_mean,
tau_ou,
sigma_ou,
) | 24d6702a92f82c6cc7fc1a337cd351b54c567e8b | 3,789 |
def is_role_user(session, user=None, group=None):
# type: (Session, User, Group) -> bool
"""
Takes in a User or a Group and returns a boolean indicating whether
that User/Group is a component of a service account.
Args:
session: the database session
user: a User object to check
group: a Group object to check
Throws:
AssertionError if neither a user nor a group is provided
Returns:
whether the User/Group is a component of a service account
"""
if user is not None:
return user.role_user
assert group is not None
user = User.get(session, name=group.groupname)
if not user:
return False
return user.role_user | 3d6b62b1708882b734031d737fa00f29ba9a9f95 | 3,790 |
def argCOM(y):
"""argCOM(y) returns the location of COM of y."""
idx = np.round(np.sum(y/np.sum(y)*np.arange(len(y))))
return int(idx) | 197ac25043b10575efb7405dba12c0d2e6f9976f | 3,791 |
def fringe(z, z1, z2, rad, a1):
"""
Approximation to the longitudinal profile of a multipole from a permanent magnet assembly.
see Wan et al. 2018 for definition and Enge functions paper (Enge 1964)
"""
zz1 = (z - z1) / (2 * rad / pc.pi)
zz2 = (z - z2) / (2 * rad / pc.pi)
fout = ( (1 / ( 2 * np.tanh((z2 - z1) / (4 * rad / pc.pi)) ) )
* (np.tanh(zz1 + a1 * zz1**2 )
- np.tanh(zz2 - a1 * zz2**2) )
)
return fout | b1d0138937d1c622809d6f559f17430e89259fed | 3,792 |
import random
def random_param_shift(vals, sigmas):
"""Add a random (normal) shift to a parameter set, for testing"""
assert len(vals) == len(sigmas)
shifts = [random.gauss(0, sd) for sd in sigmas]
newvals = [(x + y) for x, y in zip(vals, shifts)]
return newvals | 07430572c5051b7142499bcbdbc90de5abfcbd4d | 3,793 |
def compute_encrypted_request_hash(caller):
"""
This function will compute encrypted request Hash
:return: encrypted request hash
"""
first_string = get_parameter(caller.params_obj, "requesterNonce") or ""
worker_order_id = get_parameter(caller.params_obj, "workOrderId") or ""
worker_id = get_parameter(caller.params_obj, "workerId") or ""
workload_id = get_parameter(caller.params_obj, "workloadId") or ""
requester_id = get_parameter(caller.params_obj, "requesterId") or ""
requester_id = str(requester_id)
first_string += \
worker_order_id + worker_id + workload_id + requester_id
concat_hash = first_string.encode("UTF-8")
hash_1 = crypto_utils.compute_message_hash(concat_hash)
in_data = get_parameter(caller.params_obj, "inData")
out_data = get_parameter(caller.params_obj, "outData")
hash_2 = bytearray()
if in_data is not None:
hash_2 = compute_hash_string(in_data)
hash_3 = bytearray()
if out_data is not None:
hash_3 = compute_hash_string(out_data)
final_string = hash_1 + hash_2 + hash_3
caller.final_hash = crypto_utils.compute_message_hash(final_string)
encrypted_request_hash = crypto_utils.byte_array_to_hex(
crypto_utils.encrypt_data(
caller.final_hash, caller.session_key,
caller.session_iv))
return encrypted_request_hash | cf87c354df550b142030781e8b84ec1cb385489f | 3,794 |
def translate_line_test(string):
"""
Translates raw log line into sequence of integer representations for word tokens with sos and eos tokens.
:param string: Raw log line from auth_h.txt
:return: (list) Sequence of integer representations for word tokens with sos and eos tokens.
"""
data = string.split(",")
time = int(data[0]) # could be used to make categorical variables for day of week and time of day.
src_user, src_domain, dst_user, dst_domain, src_pc, dst_pc = split_line(string)
src_user = lookup(src_user, word_token_inds, None)
src_domain = lookup(src_domain, word_token_inds, domain_counts)
if dst_user.startswith('U'):
dst_user = lookup(dst_user, word_token_inds, None)
else:
dst_user = lookup(dst_user, word_token_inds, pc_counts)
dst_domain = lookup(dst_domain, word_token_inds, domain_counts)
src_pc = lookup(src_pc, word_token_inds, pc_counts)
dst_pc = lookup(dst_pc, word_token_inds, pc_counts)
if data[5].startswith("MICROSOFT_AUTH"): # Deals with file corruption for this value.
data[5] = "MICROSOFT_AUTH"
auth_type = lookup(data[5], word_token_inds, None)
logon_type = lookup(data[6], word_token_inds, None)
auth_orient = lookup(data[7], word_token_inds, None)
success = lookup(data[8].strip(), word_token_inds, None)
return "%s %s %s %s %s %s %s %s %s %s %s %s\n" % (str(sos), src_user, src_domain, dst_user,
dst_domain, src_pc, dst_pc, auth_type,
logon_type, auth_orient, success, str(eos)) | d311eb9c6b398391724e868071d89f2f6c442912 | 3,795 |
def preprocess_signal(signal, sample_rate):
"""
Preprocess a signal for input into a model
Inputs:
signal: Numpy 1D array containing waveform to process
sample_rate: Sampling rate of the input signal
Returns:
spectrogram: STFT of the signal after resampling to 10kHz and adding
preemphasis.
X_in: Scaled STFT input feature for the model
"""
# Compute the spectrogram of the signal
spectrogram = make_stft_features(signal, sample_rate)
# Get the magnitude spectrogram
mag_spec = np.abs(spectrogram)
# Scale the magnitude spectrogram with a square root squashing, and percent
# normalization
X_in = np.sqrt(mag_spec)
m = X_in.min()
M = X_in.max()
X_in = (X_in - m)/(M - m)
return spectrogram, X_in | d2b6c5cb700ae877f7bf8bd4b5a772471e69a75d | 3,796 |
def get_frameheight():
"""return fixed height for extra panel
"""
return 120 | 3bd810eea77af15527d3c1df7ab0b788cfe90000 | 3,797 |
def default_heart_beat_interval() -> int:
"""
:return: in seconds
"""
return 60 | 58171c8fb5632aa2aa46de8138828cce2eaa4d33 | 3,798 |
import re
def email_valid(email):
"""test for valid email address
>>> email_valid('[email protected]')
True
>>> email_valid('test@@testco.com')
False
>>> email_valid('test@testco')
False
"""
if email == '':
return True
email_re = re.compile(
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+"
r"(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*" # dot-atom
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|'
r'\\[\001-011\013\014\016-\177])*"' # quoted-string
r')@(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)'
r'+[A-Z]{2,6}\.?$', re.IGNORECASE) # domain
return bool(email_re.match(email)) | c76a621647595c741b1da71734a34372919e800f | 3,799 |
from typing import Any
def get_node_data(workspace: str, graph: str, table: str, node: str) -> Any:
"""Return the attributes associated with a node."""
return Workspace(workspace).graph(graph).node_attributes(table, node) | 0ac48d715fd31876b62d837d5b18b2ee75c791dd | 3,800 |
def siso_optional(fn, h_opt, scope=None, name=None):
"""Substitution module that determines to include or not the search
space returned by `fn`.
The hyperparameter takes boolean values (or equivalent integer zero and one
values). If the hyperparameter takes the value ``False``, the input is simply
put in the output. If the hyperparameter takes the value ``True``, the search
space is instantiated by calling `fn`, and the substitution module is
replaced by it.
Args:
fn (() -> (dict[str,deep_architect.core.Input], dict[str,deep_architect.core.Output])):
Function returning a graph fragment corresponding to a sub-search space.
h_opt (deep_architect.core.Hyperparameter): Hyperparameter for whether to
include the sub-search space or not.
scope (deep_architect.core.Scope, optional): Scope in which the module will be
registered. If none is given, uses the default scope.
name (str, optional): Name used to derive an unique name for the
module. If none is given, uses the class name to derive the name.
Returns:
(dict[str,deep_architect.core.Input], dict[str,deep_architect.core.Output]):
Tuple with dictionaries with the inputs and outputs of the
substitution module.
"""
def substitution_fn(dh):
return fn() if dh["opt"] else identity()
return substitution_module(_get_name(name, "SISOOptional"), substitution_fn,
{'opt': h_opt}, ['in'], ['out'], scope) | 187a292c8dba59d5d4d7f67d54cdd087ee2b6582 | 3,801 |
def saconv3x3_block(in_channels,
out_channels,
stride=1,
pad=1,
**kwargs):
"""
3x3 version of the Split-Attention convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Stride of the convolution.
pad : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
"""
return SAConvBlock(
in_channels=in_channels,
out_channels=out_channels,
ksize=3,
stride=stride,
pad=pad,
**kwargs) | bda938d53bbb56a7035ae50125743e4eb9aa709b | 3,802 |
def add_hook(**_kwargs):
"""Creates and adds the import hook in sys.meta_path"""
hook = import_hook.create_hook(
transform_source=transform_source,
hook_name=__name__,
extensions=[".pyfr"],
)
return hook | 20c7e37aead055e32bfcb520a579b66069a3e26c | 3,803 |
def mul(n1, n2):
"""
multiply two numbers
"""
return n1 * n2 | c137432dd2e5c6d4dbded08546e3d54b98fe03df | 3,804 |
import torch
def pytorch_normalze(img):
"""
https://github.com/pytorch/vision/issues/223
return appr -1~1 RGB
"""
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
img = normalize(torch.from_numpy(img))
return img.numpy() | 7667d6fa3da69d89973bb804ad08a139ae7f3564 | 3,805 |
def get_nic_capacity(driver_info, ilo_fw):
"""Gets the FRU data to see if it is NIC data
Gets the FRU data in loop from 0-255 FRU Ids
and check if the returned data is NIC data. Couldn't
find any easy way to detect if it is NIC data. We should't be
hardcoding the FRU Id.
:param driver_info: Contains the access credentials to access
the BMC.
:param ilo_fw: a tuple containing major and minor versions of firmware
:returns: the max capacity supported by the NIC adapter.
"""
i = 0x0
value = None
ilo_fw_rev = get_ilo_version(ilo_fw) or DEFAULT_FW_REV
# Note(vmud213): iLO firmware versions >= 2.3 support reading the FRU
# information in a single call instead of iterating over each FRU id.
if ilo_fw_rev < MIN_SUGGESTED_FW_REV:
for i in range(0xff):
# Note(vmud213): We can discard FRU ID's between 0x6e and 0xee
# as they don't contain any NIC related information
if (i < 0x6e) or (i > 0xee):
cmd = "fru print %s" % hex(i)
out = _exec_ipmitool(driver_info, cmd)
if out and 'port' in out and 'Adapter' in out:
value = _parse_ipmi_nic_capacity(out)
if value is not None:
break
else:
continue
else:
cmd = "fru print"
out = _exec_ipmitool(driver_info, cmd)
if out:
for line in out.split('\n'):
if line and 'port' in line and 'Adapter' in line:
value = _parse_ipmi_nic_capacity(line)
if value is not None:
break
return value | cc20e1b35a47bec1242ed5dba60da8473527ca4f | 3,806 |
import re
def isValidInifileKeyName(key):
""" Check that this key name is valid to be used in inifiles, and to be used as a python property name on a q or i object """
return re.match("^[\w_]+$", key) | 9e68b987d6ac9af3c40e053c2347b01f737f0665 | 3,807 |
def installed_pkgs():
"""
Return the list of installed packages on the machine
Returns:
list: List of installed packages
CLI Example:
.. code-block:: bash
salt '*' macpackage.installed_pkgs
"""
cmd = "pkgutil --pkgs"
return __salt__["cmd.run"](cmd).split("\n") | b9a66600327ea8eb0ec63745cacd8509a0f757d9 | 3,808 |
import math
def extract_feature(audio, sr=44100):
"""
extract feature like below:
sig:
rmse:
silence:
harmonic:
pitch:
audio: audio file or audio list
return feature_list: np of [n_samples, n_features]
"""
feature_list = []
y = []
if isinstance(audio, str):
y, _ = librosa.load(audio, sr)
elif isinstance(audio, np.ndarray):
y = audio
# 1. sig
sig_mean = np.mean(abs(y))
feature_list.append(sig_mean) # sig_mean
feature_list.append(np.std(y)) # sig_std
# 2. rmse
rmse = librosa.feature.rms(y + 0.0001)[0]
feature_list.append(np.mean(rmse)) # rmse_mean
feature_list.append(np.std(rmse)) # rmse_std
# 3. silence
silence = 0
for e in rmse:
if e <= 0.4 * np.mean(rmse):
silence += 1
silence /= float(len(rmse))
feature_list.append(silence) # silence
# 4. harmonic
y_harmonic = librosa.effects.hpss(y)[0]
feature_list.append(np.mean(y_harmonic) * 1000) # harmonic (scaled by 1000)
# 5. pitch (instead of auto_correlation)
cl = 0.45 * sig_mean
center_clipped = []
for s in y:
if s >= cl:
center_clipped.append(s - cl)
elif s <= -cl:
center_clipped.append(s + cl)
elif np.abs(s) < cl:
center_clipped.append(0)
# auto_corrs = librosa.core.autocorrelate(np.array(center_clipped))
pitch, _, _ = librosa.pyin(y, fmin=librosa.note_to_hz('C2'), fmax=librosa.note_to_hz('C7'))
pitch = [0 if math.isnan(p) else p for p in pitch]
feature_list.append(np.mean(pitch))
feature_list.append(np.std(pitch))
return np.array(feature_list).reshape(1, -1) | d4eca914605bc87c57dbaf846a9a01d79a953c56 | 3,809 |
from typing import OrderedDict
import six
def BuildPartialUpdate(clear, remove_keys, set_entries, field_mask_prefix,
entry_cls, env_builder):
"""Builds the field mask and patch environment for an environment update.
Follows the environments update semantic which applies operations
in an effective order of clear -> remove -> set.
Leading and trailing whitespace is stripped from elements in remove_keys
and the keys of set_entries.
Args:
clear: bool, If true, the patch removes existing keys.
remove_keys: iterable(string), Iterable of keys to remove.
set_entries: {string: string}, Dict containing entries to set.
field_mask_prefix: string, The prefix defining the path to the base of the
proto map to be patched.
entry_cls: AdditionalProperty, The AdditionalProperty class for the type
of entry being updated.
env_builder: [AdditionalProperty] -> Environment, A function which produces
a patch Environment with the given list of entry_cls properties.
Returns:
(string, Environment), a 2-tuple of the field mask defined by the arguments
and a patch environment produced by env_builder.
"""
remove_keys = set(k.strip() for k in remove_keys or [])
# set_entries is sorted by key to make it easier for tests to set the
# expected patch object.
set_entries = OrderedDict(
(k.strip(), v) for k, v in sorted(six.iteritems(set_entries or {})))
if clear:
entries = [
entry_cls(key=key, value=value)
for key, value in six.iteritems(set_entries)
]
return field_mask_prefix, env_builder(entries)
field_mask_entries = []
seen_keys = set()
for key in remove_keys:
field_mask_entries.append('{}.{}'.format(field_mask_prefix, key))
seen_keys.add(key)
entries = []
for key, value in six.iteritems(set_entries):
entries.append(entry_cls(key=key, value=value))
if key not in seen_keys:
field_mask_entries.append('{}.{}'.format(field_mask_prefix, key))
# Sorting field mask entries makes it easier for tests to set the expected
# field mask since dictionary iteration order is undefined.
field_mask_entries.sort()
return ','.join(field_mask_entries), env_builder(entries) | 320c589cd45dcec9a3ebba4b295075e23ef805ed | 3,811 |
def create_schema_usb():
"""Create schema usb."""
return vol.Schema(CONFIG_SCHEMA_USB) | e543a5950788ad629ed3986cc7a6c5a58931a478 | 3,813 |
def _build_field_queries(filters):
"""
Builds field queries.
Same as _build_field_query but expects a dict of field/values and returns a list of queries.
"""
return [
_build_field_query(field, value)
for field, value in filters.items()
] | 9b1241cce6c421a79cd5ea26dd134d5fd93d6fde | 3,814 |
def bycode(ent, group):
"""
Get the data with the given group code from an entity.
Arguments:
ent: An iterable of (group, data) tuples.
group: Group code that you want to retrieve.
Returns:
The data for the given group code. Can be a list of items if the group
code occurs multiple times.
"""
data = [v for k, v in ent if k == group]
if len(data) == 1:
return data[0]
return data | c5b92f2bbd1cd5bc383a1102ccf54031222d82c3 | 3,815 |
from typing import List
from typing import Tuple
def get_midi_programs(midi: MidiFile) -> List[Tuple[int, bool]]:
""" Returns the list of programs of the tracks of a MIDI, deeping the
same order. It returns it as a list of tuples (program, is_drum).
:param midi: the MIDI object to extract tracks programs
:return: the list of track programs, as a list of tuples (program, is_drum)
"""
return [(int(track.program), track.is_drum) for track in midi.instruments] | 7249baa46b80b8b42400068edacf5ce9e829c71f | 3,817 |
def is_depth_wise_conv(module):
"""Determine Conv2d."""
if hasattr(module, "groups"):
return module.groups != 1 and module.in_channels == module.out_channels
elif hasattr(module, "group"):
return module.group != 1 and module.in_channels == module.out_channels | 27127f54edbf8d0653cab6c7dbfb1448f33ecab4 | 3,818 |
def list_all_routed():
"""
List all the notifications that have been routed to any repository, limited by the parameters supplied
in the URL.
See the API documentation for more details.
:return: a list of notifications appropriate to the parameters
"""
return _list_request() | d67141d6fa5908d99292d898a5a77df3e80d47aa | 3,819 |
def Lstart(gridname='BLANK', tag='BLANK', ex_name='BLANK'):
"""
This adds more run-specific entries to Ldir.
"""
# put top level information from input into a dict
Ldir['gridname'] = gridname
Ldir['tag'] = tag
Ldir['ex_name'] = ex_name
# and add a few more things
Ldir['gtag'] = gridname + '_' + tag
Ldir['gtagex'] = gridname + '_' + tag + '_' + ex_name
Ldir['grid'] = Ldir['data'] / 'grids' / gridname
Ldir['forecast_days'] = 3
Ldir['ds_fmt'] = ds_fmt
Ldir['roms_time_units'] = roms_time_units
Ldir['modtime0'] = modtime0
return Ldir.copy()
# the use of copy() means different calls to Lstart (e.g. when importing
# plotting_functions) to not overwrite each other | 92d992c3a7eba7bbba9146018060bca7844d4a78 | 3,822 |
def rfe_w2(x, y, p, classifier):
"""RFE algorithm, where the ranking criteria is w^2,
described in [Guyon02]_. `classifier` must be an linear classifier
with learn() and w() methods.
.. [Guyon02] I Guyon, J Weston, S Barnhill and V Vapnik. Gene Selection for Cancer Classification using Support Vector Machines. Machine Learning, 2002.
:Parameters:
x: 2d array_like object (N,P)
training data
y : 1d array_like object integer (N)
class labels (only two classes)
p : float [0.0, 1.0]
percentage of features (upper rounded) to remove
at each iteration (p=0 one variable)
classifier : object with learn() and w() methods
object
:Returns:
ranking : 1d numpy array int
feature ranking. ranking[i] contains the feature index ranked
in i-th position.
"""
if (p < 0.0) or (p > 1.0):
raise ValueError("parameter p must be in [0.0, 1.0]")
if not (hasattr(classifier, 'learn') and hasattr(classifier, 'w')):
raise ValueError("parameter classifier must have learn() and w() methods")
xarr = np.asarray(x, dtype=np.float)
yarr = np.asarray(y, dtype=np.int)
if xarr.ndim != 2:
raise ValueError("x must be a 2d array_like object")
if yarr.ndim != 1:
raise ValueError("y must be an 1d array_like object")
if xarr.shape[0] != yarr.shape[0]:
raise ValueError("x, y shape mismatch")
labels = np.unique(yarr)
if labels.shape[0] != 2:
raise ValueError("number of classes must be = 2")
idxglobal = np.arange(xarr.shape[1], dtype=np.int)
ranking = []
while True:
nelim = np.max((int(np.ceil(idxglobal.shape[0] * p)), 1))
xi = xarr[:, idxglobal]
classifier.learn(xi, yarr)
w = classifier.w()
idxsorted = np.argsort(w**2)
# indexes to remove
idxelim = idxglobal[idxsorted[:nelim]][::-1]
ranking.insert(0, idxelim)
# update idxglobal
idxglobal = idxglobal[idxsorted[nelim:]]
idxglobal.sort()
if len(idxglobal) <= 1:
ranking.insert(0, idxglobal)
break
return np.concatenate(ranking) | 9176ee36c1180ab862b23be9d9a09584abea50ca | 3,823 |
from typing import List
def compress_timeline(timeline: List, salt: bytes) -> List:
"""
Compress the verbose Twitter feed into a small one. Just keep the useful elements.
The images are downloaded per-request.
Args:
timeline (List): The Twitter timeline.
salt (bytes): The salt to apply on the filename.
Returns:
List: The timeline with less information and links to the (locally) stored images.
"""
compressed_timeline = []
for tweet in timeline:
profile_image_url = tweet["user"]["profile_image_url_https"]
compressed_tweet = {
"created_at": tweet["created_at"],
"text": tweet["text"],
"id_str": tweet["id_str"],
"user": {
"name": tweet["user"]["name"],
"screen_name": tweet["user"]["screen_name"],
"profile_image_origin": encode_media_origin(profile_image_url),
"profile_image_filename": create_media_filename(
profile_image_url, salt
),
},
}
if tweet["retweeted"]:
original_source = tweet["retweeted_status"]["user"]
profile_image_url = original_source["profile_image_url_https"]
compressed_tweet["retweeted_status"] = {
"user": {
"name": original_source["name"],
"screen_name": original_source["screen_name"],
"profile_image_origin": encode_media_origin(profile_image_url),
"profile_image_filename": create_media_filename(
profile_image_url, salt
),
}
}
compressed_timeline.append(compressed_tweet)
return compressed_timeline | aff1364714d7e83685ab2257167fcd8bc7e10436 | 3,824 |
def createFinalCompactedData(compacted_data,elevations):
"""
This function creates a dataframe that combines the RGB data and the elevations data
into a dataframe that can be used for analysis
Parameters
----------
compacted_data : list of compacted data returned from condensePixels.
elevations : list of elevations from getUSGSElevations.
Returns
-------
final_compacted_data : dataframe of merged data.
"""
lats = []
lons = []
reds = []
greens = []
blues = []
els = []
for i in range(len(compacted_data)):
for j in range(len(compacted_data[0])):
reds.append(compacted_data[i][j][0])
greens.append(compacted_data[i][j][1])
blues.append(compacted_data[i][j][2])
lats.append(compacted_data[i][j][3])
lons.append(compacted_data[i][j][4])
els.append(elevations[i][j])
final_compacted_data = pd.DataFrame({'Lat':lats,'Lon':lons,'Elevation':els,'Red':reds,'Green':greens,'Blue':blues})
return final_compacted_data | 0d8b6a5e10504c32988e05e7450ebcf077305949 | 3,825 |
def get_sorted_nodes_edges(bpmn_graph):
"""
Assure an ordering as-constant-as-possible
Parameters
--------------
bpmn_graph
BPMN graph
Returns
--------------
nodes
List of nodes of the BPMN graph
edges
List of edges of the BPMN graph
"""
graph = bpmn_graph.get_graph()
graph_nodes = list(graph.nodes(data=False))
graph_edges = list(graph.edges(data=False))
bfs = bfs_bpmn(graph_nodes, graph_edges)
graph_nodes = sort_nodes_given_bfs(graph_nodes, bfs)
graph_edges = sort_edges_given_bfs(graph_edges, bfs)
return graph_nodes, graph_edges | 879d7e8e3e5e4e9a8db3fc01622b96dde2b7af25 | 3,826 |
from typing import Optional
from typing import Dict
from typing import Any
def list_commits(
access_key: str,
url: str,
owner: str,
dataset: str,
*,
revision: Optional[str] = None,
offset: Optional[int] = None,
limit: Optional[int] = None,
) -> Dict[str, Any]:
"""Execute the OpenAPI `GET /v2/datasets/{owner}/{dataset}/commits`.
Arguments:
access_key: User's access key.
url: The URL of the graviti website.
owner: The owner of the dataset.
dataset: Name of the dataset, unique for a user.
revision: The information to locate the specific commit, which can be the commit id,
the branch name, or the tag name.
offset: The offset of the page. The default value of this param in OpenAPIv2 is 0.
limit: The limit of the page. The default value of this param in OpenAPIv2 is 24.
Returns:
The response of OpenAPI.
Examples:
>>> list_commits(
... "ACCESSKEY-********",
... "https://api.graviti.com",
... "czhual",
... "MNIST",
... )
{
"commits": [
{
"commit_id": "85c57a7f03804ccc906632248dc8c359",
"parent_commitId": "784ba0d3bf0a41f6a7bfd771d8c00fcb",
"title": "upload data",
"description": "",
"committer": "Gravitier",
"committed_at": "2021-03-03T18:58:10Z"
}
],
"offset": 0,
"record_size": 1,
"total_count": 1
}
"""
url = f"{url}/v2/datasets/{owner}/{dataset}/commits"
params: Dict[str, Any] = {}
if offset is not None:
params["offset"] = offset
if limit is not None:
params["limit"] = limit
if revision is not None:
params["revision"] = revision
return open_api_do("GET", access_key, url, params=params).json() | be3899be0b77de069c7d32ca39aaec2039fe89e4 | 3,827 |
import heapq
def dijkstra(graph, start, end=None):
"""
Find shortest paths from the start vertex to all
vertices nearer than or equal to the end.
The input graph G is assumed to have the following
representation: A vertex can be any object that can
be used as an index into a dictionary. G is a
dictionary, indexed by vertices. For any vertex v,
G[v] is itself a dictionary, indexed by the neighbors
of v. For any edge v->w, G[v][w] is the length of
the edge.
The output is a pair (D,P) where D[v] is the distance
from start to v and P[v] is the predecessor of v along
the shortest path from s to v.
Original by
David Eppstein, UC Irvine, 4 April 2002
http://code.activestate.com/recipes/119466-dijkstras-algorithm-for-shortest-paths/
>>> G = DirectedGraph({'s':{'u':10, 'x':5}, 'u':{'v':1, 'x':2}, 'v':{'y':4}, 'x':{'u':3, 'v':9, 'y':2}, \
'y':{'s':7, 'v':6}})
>>> distances, predecessors = dijkstra(G, 's', 'v')
>>> sorted(distances.items())
[('s', 14), ('u', 8), ('v', 9), ('x', 5), ('y', 7)]
>>> sorted(predecessors.items())
[('s', 'y'), ('u', 'x'), ('v', 'u'), ('x', 's'), ('y', 'x')]
"""
distances = {} # dictionary of final distances
predecessors = {} # dictionary of predecessors (previous node)
queue = [] # queue
heapq.heappush(queue, (0, start))
while len(queue) > 0:
distance, node = heapq.heappop(queue)
if node in distances and distance > distances[node]:
continue
if node == end:
break
# Loop through neighbours
edges = graph.edges(node, distance=distance)
for neighbour, length in edges.items():
total = distance + length
if neighbour in distances:
if total >= distances[neighbour]:
continue
distances[neighbour] = total
predecessors[neighbour] = node
heapq.heappush(queue, (total, neighbour))
return distances, predecessors | b2a1ee983534c0a4af36ae7e3490c3b66949609b | 3,828 |
def tournament_selection(pop, size):
""" tournament selection
individual eliminate one another until desired breeding size is reached
"""
participants = [ind for ind in pop.population]
breeding = []
# could implement different rounds here
# but I think that's almost the same as calling tournament different times with smaller sizes
for i in range(size):
a, b = rng.choice(participants, 2)
if a > b:
breeding.append(a)
participants.remove(a)
else:
breeding.append(b)
participants.remove(b)
return breeding | 78bebc2de25d0744f3f8dabd67f70136d5f020b5 | 3,830 |
import math
def bond_number(r_max, sigma, rho_l, g):
""" calculates the Bond number for the largest droplet according to
Cha, H.; Vahabi, H.; Wu, A.; Chavan, S.; Kim, M.-K.; Sett, S.; Bosch, S. A.; Wang, W.; Kota, A. K.; Miljkovic, N.
Dropwise Condensation on Solid Hydrophilic Surfaces. Science Advances 2020, 6 (2), eaax0746.
https://doi.org/10.1126/sciadv.aax0746"""
l_y = math.sqrt(sigma / (rho_l*g))
bond = r_max**2 / l_y**2
return bond | 2098a762dd7c2e80ff4a570304acf7cfbdbba2e5 | 3,831 |
def spatial_conv(inputs,
conv_type,
kernel,
filters,
stride,
is_training,
activation_fn='relu',
data_format='channels_last'):
"""Performs 1x1 conv followed by 2d or depthwise conv.
Args:
inputs: `Tensor` of size `[batch*time, height, width, channels]`. Only
supports 'channels_last' as the data format.
conv_type: 'string' of "std", "depth", "maxpool", or "avgpool" this selects
the spatial conv/pooling method.
kernel: `int` kernel size to be used for `conv2d` or max_pool2d` operations.
Should be a positive integer.
filters: `int` number of filters in the convolution.
stride: 'int' temporal stride
is_training: 'bool' specifying whether in training mode or not.
activation_fn: 'string' the activation function to use (relu or swish)
data_format: `str`. Only supports 'channels_last' as the data format.
Returns:
A `Tensor` of the same data_format
"""
if kernel == 1:
return inputs
use_relu = (activation_fn == 'relu')
if conv_type == 'std' or conv_type == 'depth':
inputs = conv2d(inputs, 1, filters, 1, is_training, use_relu=use_relu)
if not use_relu:
inputs = hard_swish(inputs)
if conv_type == 'std' or conv_type == '1std':
inputs = conv2d(inputs, int(kernel), filters, int(stride), is_training,
use_relu=use_relu)
if not use_relu:
inputs = hard_swish(inputs)
elif conv_type == 'depth':
depth_multiplier = 1
depthwise_kernel_shape = (int(kernel), int(kernel), inputs.shape[-1],
depth_multiplier)
depthwise_kernel = contrib_framework.model_variable(
name='depthwise_kernel',
shape=depthwise_kernel_shape,
dtype=tf.float32,
initializer=contrib_layers.variance_scaling_initializer(
factor=2.0, mode='FAN_IN', uniform=False),
trainable=True)
inputs = tf.nn.depthwise_conv2d(
inputs,
tf.cast(depthwise_kernel, inputs.dtype),
strides=[1, int(stride), int(stride), 1],
padding='SAME',
rate=[1, 1],
data_format='NHWC' if data_format == 'channels_last' else 'NCHW')
inputs = bn.batch_norm_relu(
inputs,
is_training,
relu=use_relu,
data_format=data_format)
if not use_relu:
inputs = hard_swish(inputs)
elif conv_type == 'maxpool':
inputs = tf.layers.max_pooling2d(
inputs,
int(kernel),
int(stride),
padding='same',
data_format=data_format)
elif conv_type == 'avgpool':
inputs = tf.layers.average_pooling2d(
inputs,
int(kernel),
int(stride),
padding='same',
data_format=data_format)
return inputs | e87820eaa5b8ed13157fe0790c4e09b1bc546a0d | 3,832 |
async def timeron(websocket, battleID):
"""Start the timer on a Metronome Battle.
"""
return await websocket.send(f'{battleID}|/timer on') | f1601694e2c37d41adcc3983aa535347dc13db71 | 3,833 |
import numpy
def to_unit_vector(this_vector):
""" Convert a numpy vector to a unit vector
Arguments:
this_vector: a (3,) numpy array
Returns:
new_vector: a (3,) array with the same direction but unit length
"""
norm = numpy.linalg.norm(this_vector)
assert norm > 0.0, "vector norm must be greater than 0"
if norm:
return this_vector/numpy.linalg.norm(this_vector)
else:
return this_vector | ae46bf536b8a67a1be1e98ae051eebf1f8696e37 | 3,834 |
import base64
def decode(msg):
""" Convert data per pubsub protocol / data format
Args:
msg: The msg from Google Cloud
Returns:
data: The msg data as a string
"""
if 'data' in msg:
data = base64.b64decode(msg['data']).decode('utf-8')
return data | 32e85b3f0c18f3d15ecb0779825941024da75909 | 3,835 |
def pivot_longer_by_humidity_and_temperature(df: pd.DataFrame) -> pd.DataFrame:
"""
Reshapes the dataframe by collapsing all of the temperature and humidity
columns into an temperature, humidity, and location column
Parameters
----------
df : pd.DataFrame
The cleaned and renamed dataframe from add_date_features().
Returns
-------
pd.DataFrame
A much longer dataframe with an exposed location column
to perform operations on.
"""
# Need to melt both variables individually, which creates
# a ton of meaningless rows in the second melt.
temporary_df = df.melt(
id_vars=[colname for colname in df.columns if "temp" not in colname],
var_name="temperature_location",
value_name="temperature",
ignore_index=False,
)
temporary_df = temporary_df.melt(
id_vars=[
colname for colname in temporary_df.columns if "humidity" not in colname
],
var_name="humidity_location",
value_name="humidity",
ignore_index=False,
)
temporary_df["temperature_location"] = temporary_df[
"temperature_location"
].str.replace("temperature_", "")
temporary_df["humidity_location"] = temporary_df["humidity_location"].str.replace(
"humidity_", ""
)
# We know all measurements come from slices of time that contain a measurement of both humidity
# and temperature from one location, so if we combine the location columns we can drop
# the extra rows created during the second melt.
df = temporary_df[
temporary_df["temperature_location"] == temporary_df["humidity_location"]
]
df = df.drop(columns=["humidity_location"]).rename(
columns={"temperature_location": "measurement_location"}
)
return df | d60b92b523c31b3f7db799f58a42bd9ca810d258 | 3,836 |
Subsets and Splits