content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def _feature_properties(feature, layer_definition, whitelist=None, skip_empty_fields=False):
""" Returns a dictionary of feature properties for a feature in a layer.
Third argument is an optional list or dictionary of properties to
whitelist by case-sensitive name - leave it None to include everything.
A dictionary will cause property names to be re-mapped.
OGR property types:
OFTInteger (0), OFTIntegerList (1), OFTReal (2), OFTRealList (3),
OFTString (4), OFTStringList (5), OFTWideString (6), OFTWideStringList (7),
OFTBinary (8), OFTDate (9), OFTTime (10), OFTDateTime (11).
Extra OGR types for GDAL 2.x:
OFTInteger64 (12), OFTInteger64List (13)
"""
properties = {}
okay_types = [ogr.OFTInteger, ogr.OFTReal, ogr.OFTString,
ogr.OFTWideString, ogr.OFTDate, ogr.OFTTime, ogr.OFTDateTime]
if hasattr(ogr, 'OFTInteger64'):
okay_types.extend([ogr.OFTInteger64, ogr.OFTInteger64List])
for index in range(layer_definition.GetFieldCount()):
field_definition = layer_definition.GetFieldDefn(index)
field_type = field_definition.GetType()
name = field_definition.GetNameRef()
if type(whitelist) in (list, dict) and name not in whitelist:
continue
if field_type not in okay_types:
try:
name = [oft for oft in dir(ogr) if oft.startswith('OFT') and getattr(ogr, oft) == field_type][0]
except IndexError:
raise KnownUnknown("Found an OGR field type I've never even seen: %d" % field_type)
else:
raise KnownUnknown("Found an OGR field type I don't know what to do with: ogr.%s" % name)
if not skip_empty_fields or feature.IsFieldSet(name):
property = type(whitelist) is dict and whitelist[name] or name
properties[property] = feature.GetField(name)
return properties | 482e42a9f4761cd0273dfb4e5f70bdb55ce168d9 | 3,650,900 |
def reverse_search(view, what, start=0, end=-1, flags=0):
"""Do binary search to find `what` walking backwards in the buffer.
"""
if end == -1:
end = view.size()
end = find_eol(view, view.line(end).a)
last_match = None
lo, hi = start, end
while True:
middle = (lo + hi) / 2
line = view.line(middle)
middle, eol = find_bol(view, line.a), find_eol(view, line.a)
if search_in_range(view, what, middle, hi, flags):
lo = middle
elif search_in_range(view, what, lo, middle - 1, flags):
hi = middle -1
else:
return calculate_relative_ref(view, '.')
# Don't search forever the same line.
if last_match and line.contains(last_match):
match = find_last_match(view, what, lo, hi, flags=flags)
return view.rowcol(match.begin())[0] + 1
last_match = sublime.Region(line.begin(), line.end()) | 7b8d95a987b9b986fb0e334cf3a9bc74014be67d | 3,650,901 |
def formatLookupLigatureSubstitution(lookup, lookupList, makeName=makeName):
""" GSUB LookupType 4 """
# substitute <glyph sequence> by <glyph>;
# <glyph sequence> must contain two or more of <glyph|glyphclass>. For example:
# substitute [one one.oldstyle] [slash fraction] [two two.oldstyle] by onehalf;
lines = list(filter(None, [ formatLookupflag(lookup, makeName=makeName) ])) \
+ ['sub {0} {1} by {2};'.format(first, ' '.join(lig.Component), lig.LigGlyph)
for sub in lookup.SubTable
for first, ligatures in sub.ligatures.items()
for lig in ligatures]
return (True, lines) | 3804d7c38564459b6f0cf19cbbac5e96642e61a2 | 3,650,902 |
import pathlib
def convert_raw2nc(path2rawfolder = '/nfs/grad/gradobs/raw/mlo/2020/', path2netcdf = '/mnt/telg/data/baseline/mlo/2020/',
# database = None,
start_date = '2020-02-06',
pattern = '*sp02.*',
sernos = [1032, 1046],
site = 'mlo',
overwrite = False,
verbose = False,
raise_error = True,
test = False):
"""
Parameters
----------
path2rawfolder : TYPE, optional
DESCRIPTION. The default is '/nfs/grad/gradobs/raw/mlo/2020/'.
path2netcdf : TYPE, optional
DESCRIPTION. The default is '/mnt/telg/data/baseline/mlo/2020/'.
# database : TYPE, optional
DESCRIPTION. The default is None.
start_date : TYPE, optional
DESCRIPTION. The default is '2020-02-06'.
pattern : str, optional
Only files with this pattern are considered. In newer raw data
versions this would be '*sp02.*'. In older ones: 'MLOD*'
sernos : TYPE, optional
DESCRIPTION. The default is [1032, 1046].
overwrite : TYPE, optional
DESCRIPTION. The default is False.
verbose : TYPE, optional
DESCRIPTION. The default is False.
test : TYPE, optional
If True only one file is processed. The default is False.
Returns
-------
None.
"""
# lines = get_lines_from_station_header()
path2rawfolder = pathlib.Path(path2rawfolder)
path2netcdf = pathlib.Path(path2netcdf)
try:
path2netcdf.mkdir(exist_ok=True)
except FileNotFoundError:
path2netcdf.parent.mkdir()
path2netcdf.mkdir()
file_list = list(path2rawfolder.glob(pattern))
# print(len(file_list))
# file_contents = []
# return file_list
df_in = pd.DataFrame(file_list, columns=['path_in'])
# test what format, old or new.
p2f = file_list[0]
nl = p2f.name.split('.')
if len(nl) == 2:
# old format like /nfs/grad/gradobs/raw/mlo/2013/sp02/MLOD013A.113
# get year from path
def path2date(path2file):
year = path2file.parent.parent.name
jul = int(''.join(filter(str.isdigit, path2file.name.split('.')[0])))
date = pd.to_datetime(year) + pd.to_timedelta(jul-1, 'd')
return date
# df_in.index = df_in.path_in.apply(lambda x: pd.to_datetime(year) + pd.to_timedelta((int(''.join(filter(str.isdigit, x.name.split('.')[0]))))-1, 'd'))
else:
# new format: gradobs.mlo-sp02.20200126.raw.dat
# df_in.index = df_in.path_in.apply(lambda x: pd.to_datetime(x.name.split('.')[2]))
path2date = lambda x: pd.to_datetime(x.name.split('.')[2])
# set index based on format
df_in.index = df_in.path_in.apply(path2date)
df_in.sort_index(inplace=True)
df_in = df_in.truncate(before=start_date)
df_out = pd.DataFrame(columns=['path_out'])
# generate output path
for sn in sernos:
for idx, row in df_in.iterrows():
# fnnc = row.path_in.name.replace('.dat','.nc')
# fnnc = fnnc.replace('-sp02', '.sp02')
# fnns = fnnc.split('.')
# fnns = fnns[:3] + [f'sn{str(sn)}'] + fnns[3:]
# fnnc = '.'.join(fnns)
# path2netcdf_file = path2netcdf.joinpath(fnnc)
date = idx
fnnc = f'gradobs.mlo.sp02.sn{sn}.{date.year}{date.month:02d}{date.day:02d}.raw.nc'
path2netcdf_file = path2netcdf.joinpath(fnnc)
df_add = pd.DataFrame({'path_in': row.path_in, 'path_out':path2netcdf_file}, index = [idx]
# ignore_index=True
)
df_out = df_out.append(df_add)
# check if file exists. Process only those that do not exist
df_out['exists'] = df_out.path_out.apply(lambda x: x.is_file())
df_work = df_out[~df_out.exists]
# return df_work
### bsts
work_array = df_work.path_in.unique()
print(f'No of files that need to be processed: {len(work_array)}')
# exists = 0
# new = 0
for e, file in enumerate(work_array):
# if e == 3: break
# ds = read_file(file, lines)
df_sel = df_work[df_work.path_in == file]
try:
dslist = read_file(file, database = database, site = site)
except IndexError:
if raise_error:
raise
else:
print('Instrument not installed ... skip', end = '...')
if test:
return {'file': file, 'database': database}
else:
continue
### generate output file name
# processing
for ds in dslist:
# fnnc = file.name.replace('.dat','.nc')
# fnnc = fnnc.replace('-sp02', '.sp02')
# fnns = fnnc.split('.')
# fnns = fnns[:3] + [f'sn{str(ds.serial_no.values)}'] + fnns[3:]
# fnnc = '.'.join(fnns)
# path2netcdf_file = path2netcdf.joinpath(fnnc)
# check which of the output files is the right ... still, i am not convinced this is the most elegant way to do this.... add the lineno in the work table?
sn = str(ds.serial_no.values)
try:
path2netcdf_file = [p2fo for p2fo in df_sel.path_out.values if sn in p2fo.name][0]
except IndexError:
assert(False), 'This Error is usually caused because one of the netcdf files (for a serial number) is deleted, but not the other.'
# save to file
ds.to_netcdf(path2netcdf_file)
if test:
break
# out = dict(processed = new,
# skipped = exists,
# last_ds_list = dslist)
if not test:
df_out['exists'] = df_out.path_out.apply(lambda x: x.is_file())
df_work = df_out[~df_out.exists]
work_array = df_work.path_in.unique()
assert(df_work.shape[0] == 0), f'df_work should be empty at the end. Still has {df_work.shape[0]} entries.'
return | 16313b1a7abc05fac469d9a0c5003eebb7ef2a8c | 3,650,903 |
import requests
def get_curricula(course_url, year):
"""Encodes the available curricula for a given course in a given year in a vaguely sane format
Dictionary fields:
- constant.CODEFLD: curriculum code as used in JSON requests
- constant.NAMEFLD: human-readable curriculum name"""
curricula = []
curricula_req_url = constant.CURRICULAURLFORMAT[get_course_lang(course_url)].format(course_url, year)
for curr in requests.get(curricula_req_url).json():
curricula.append({constant.CODEFLD: curr[constant.CURRVAL], constant.NAMEFLD: curr[constant.CURRNAME]})
return curricula | 878f2a54e41624887aed720de52dea15bdbf6528 | 3,650,904 |
def conv3x3(in_planes, out_planes, stride=1, groups=1):
"""3x3 conv with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, groups=groups, bias=False) | 4e53568bb4bf88998020b0804770895b67e9e018 | 3,650,905 |
import stewi
def extract_facility_data(inventory_dict):
"""
Returns df of facilities from each inventory in inventory_dict,
including FIPS code
:param inventory_dict: a dictionary of inventory types and years (e.g.,
{'NEI':'2017', 'TRI':'2017'})
:return: df
"""
facility_mapping = pd.DataFrame()
# load facility data from stewi output directory, keeping only the facility IDs,
# and geographic information
inventory_list = list(inventory_dict.keys())
for i in range(len(inventory_dict)):
# define inventory name as inventory type + inventory year (e.g., NEI_2017)
database = inventory_list[i]
year = list(inventory_dict.values())[i]
inventory_name = database + '_' + year
facilities = stewi.getInventoryFacilities(database, year)
facilities = facilities[['FacilityID', 'State', 'County', 'NAICS']]
if len(facilities[facilities.duplicated(subset='FacilityID', keep=False)]) > 0:
log.debug('Duplicate facilities in %s - keeping first listed', inventory_name)
facilities.drop_duplicates(subset='FacilityID',
keep='first', inplace=True)
facility_mapping = facility_mapping.append(facilities)
# Apply FIPS to facility locations
facility_mapping = apply_county_FIPS(facility_mapping)
return facility_mapping | b0298b915c0511841d607f7d88a90d64c7e9da59 | 3,650,906 |
import numpy
def zeros(shape, dtype=None):
"""
Create a Tensor filled with zeros, closer to Numpy's syntax than ``alloc``.
"""
if dtype is None:
dtype = config.floatX
return alloc(numpy.array(0, dtype=dtype), *shape) | 9d1d70f59b585d06623d41c18acc67ec16572307 | 3,650,907 |
from typing import Optional
import types
import numpy
from typing import cast
def station_location_from_rinex(rinex_path: str) -> Optional[types.ECEF_XYZ]:
"""
Opens a RINEX file and looks in the headers for the station's position
Args:
rinex_path: the path to the rinex file
Returns:
XYZ ECEF coords in meters for the approximate receiver location
approximate meaning may be off by a meter or so
or None if ECEF coords could not be found
"""
xyz = None
lat = None
lon = None
height = None
with open(rinex_path, "rb") as filedat:
for _ in range(50):
linedat = filedat.readline()
if b"POSITION XYZ" in linedat:
xyz = numpy.array([float(x) for x in linedat.split()[:3]])
elif b"Monument location:" in linedat:
lat, lon, height = [float(x) for x in linedat.split()[2:5]]
elif b"(latitude)" in linedat:
lat = float(linedat.split()[0])
elif b"(longitude)" in linedat:
lon = float(linedat.split()[0])
elif b"(elevation)" in linedat:
height = float(linedat.split()[0])
if lat is not None and lon is not None and height is not None:
xyz = coordinates.geodetic2ecef((lat, lon, height))
if xyz is not None:
return cast(types.ECEF_XYZ, xyz)
return None | e7fc390a36f34aed04d30becd544d58ea3f6aa41 | 3,650,908 |
def get_profiles():
"""Return the paths to all profiles in the local library"""
paths = APP_DIR.glob("profile_*")
return sorted(paths) | 729b78daa1d259a227147698a2d4d4c9c5126f29 | 3,650,909 |
def split(array, nelx, nely, nelz, dof):
"""
Splits an array of boundary conditions into an array of collections of
elements. Boundary conditions that are more than one node in size are
grouped together. From the nodes, the function returns the neighboring
elements inside the array.
"""
if len(array) == 0:
return []
array.sort()
connected_nodes = [array[0]]
nlist = []
tmp = _get_elem(array[0], nelx, nely, nelz, dof)
for i in range(1, len(array)):
if _nodes_connected(connected_nodes, array[i], nelx, nely, nelz, dof):
tmp = tmp.union(_get_elem(array[i], nelx, nely, nelz, dof))
connected_nodes.append(array[i])
else:
nlist.append(list(tmp))
tmp = _get_elem(array[i], nelx, nely, nelz, dof)
nlist.append(list(tmp))
return nlist | 1dbc48402e7124e3384bc56538b05f073fe64370 | 3,650,910 |
def sanitize_app_name(app):
"""Sanitize the app name and build matching path"""
app = "".join(c for c in app if c.isalnum() or c in ('.', '_')).rstrip().lstrip('/')
return app | fca922d8b622baa1d5935cd8eca2ffca050a4c86 | 3,650,911 |
import pathlib
def get_rinex_file_version(file_path: pathlib.PosixPath) -> str:
""" Get RINEX file version for a given file path
Args:
file_path: File path.
Returns:
RINEX file version
"""
with files.open(file_path, mode="rt") as infile:
try:
version = infile.readline().split()[0]
except IndexError:
log.fatal(f"Could not find Rinex version in file {file_path}")
return version | c7060e8eb32a0e5539323c7334221d4b1967bb1f | 3,650,912 |
import socket
def get_hm_port(identity_service, local_unit_name, local_unit_address,
host_id=None):
"""Get or create a per unit Neutron port for Octavia Health Manager.
A side effect of calling this function is that a port is created if one
does not already exist.
:param identity_service: reactive Endpoint of type ``identity-service``
:type identity_service: RelationBase class
:param local_unit_name: Name of juju unit, used to build tag name for port
:type local_unit_name: str
:param local_unit_address: DNS resolvable IP address of unit, used to
build Neutron port ``binding:host_id``
:type local_unit_address: str
:param host_id: Identifier used by SDN for binding the port
:type host_id: Option[None,str]
:returns: Port details extracted from result of call to
neutron_client.list_ports or neutron_client.create_port
:rtype: dict
:raises: api_crud.APIUnavailable, api_crud.DuplicateResource
"""
session = session_from_identity_service(identity_service)
try:
nc = init_neutron_client(session)
resp = nc.list_networks(tags='charm-octavia')
except NEUTRON_TEMP_EXCS as e:
raise APIUnavailable('neutron', 'networks', e)
network = None
n_resp = len(resp.get('networks', []))
if n_resp == 1:
network = resp['networks'][0]
elif n_resp > 1:
raise DuplicateResource('neutron', 'networks', data=resp)
else:
ch_core.hookenv.log('No network tagged with `charm-octavia` exists, '
'deferring port setup awaiting network and port '
'(re-)creation.', level=ch_core.hookenv.WARNING)
return
health_secgrp = None
try:
resp = nc.list_security_groups(tags='charm-octavia-health')
except NEUTRON_TEMP_EXCS as e:
raise APIUnavailable('neutron', 'security_groups', e)
n_resp = len(resp.get('security_groups', []))
if n_resp == 1:
health_secgrp = resp['security_groups'][0]
elif n_resp > 1:
raise DuplicateResource('neutron', 'security_groups', data=resp)
else:
ch_core.hookenv.log('No security group tagged with '
'`charm-octavia-health` exists, deferring '
'port setup awaiting network and port '
'(re-)creation...',
level=ch_core.hookenv.WARNING)
return
try:
resp = nc.list_ports(tags='charm-octavia-{}'
.format(local_unit_name))
except NEUTRON_TEMP_EXCS as e:
raise APIUnavailable('neutron', 'ports', e)
port_template = {
'port': {
# avoid race with OVS agent attempting to bind port
# before it is created in the local units OVSDB
'admin_state_up': False,
'binding:host_id': host_id or socket.gethostname(),
# NOTE(fnordahl): device_owner has special meaning
# for Neutron [0], and things may break if set to
# an arbritary value. Using a value known by Neutron
# is_dvr_serviced() function [1] gets us the correct
# rules appiled to the port to allow IPv6 Router
# Advertisement packets through LP: #1813931
# 0: https://github.com/openstack/neutron/blob/
# 916347b996684c82b29570cd2962df3ea57d4b16/
# neutron/plugins/ml2/drivers/openvswitch/
# agent/ovs_dvr_neutron_agent.py#L592
# 1: https://github.com/openstack/neutron/blob/
# 50308c03c960bd6e566f328a790b8e05f5e92ead/
# neutron/common/utils.py#L200
'device_owner': (
neutron_lib.constants.DEVICE_OWNER_LOADBALANCERV2),
'security_groups': [
health_secgrp['id'],
],
'name': 'octavia-health-manager-{}-listen-port'
.format(local_unit_name),
'network_id': network['id'],
},
}
n_resp = len(resp.get('ports', []))
if n_resp == 1:
hm_port = resp['ports'][0]
# Ensure binding:host_id is up to date on a existing port
#
# In the event of a need to update it, we bring the port down to make
# sure Neutron rebuilds the port correctly.
#
# Our caller, ``setup_hm_port``, will toggle the port admin status.
if hm_port and hm_port.get(
'binding:host_id') != port_template['port']['binding:host_id']:
try:
nc.update_port(hm_port['id'], {
'port': {
'admin_state_up': False,
'binding:host_id': port_template['port'][
'binding:host_id'],
}
})
except NEUTRON_TEMP_EXCS as e:
raise APIUnavailable('neutron', 'ports', e)
elif n_resp > 1:
raise DuplicateResource('neutron', 'ports', data=resp)
else:
# create new port
try:
resp = nc.create_port(port_template)
hm_port = resp['port']
ch_core.hookenv.log('Created port {}'.format(hm_port['id']),
ch_core.hookenv.INFO)
# unit specific tag is used by each unit to load their state
nc.add_tag('ports', hm_port['id'],
'charm-octavia-{}'
.format(local_unit_name))
# charm-wide tag is used by leader to load cluster state and build
# ``controller_ip_port_list`` configuration property
nc.add_tag('ports', hm_port['id'], 'charm-octavia')
except NEUTRON_TEMP_EXCS as e:
raise APIUnavailable('neutron', 'ports', e)
return hm_port | 6cde426643219f4fc3385a36e3c20503b8c41a9e | 3,650,913 |
def total_length(neurite):
"""Neurite length. For a morphology it will be a sum of all neurite lengths."""
return sum(s.length for s in neurite.iter_sections()) | 854429e073eaea49c168fb0f9e381c71d7a7038a | 3,650,914 |
def _solarize(img, magnitude):
"""solarize"""
return ImageOps.solarize(img, magnitude) | d588068f42930872775e62a619333439d8aa47d8 | 3,650,915 |
def calculateCurvature(yRange, left_fit_cr):
"""
Returns the curvature of the polynomial `fit` on the y range `yRange`.
"""
return ((1 + (2 * left_fit_cr[0] * yRange * ym_per_pix + left_fit_cr[1]) ** 2) ** 1.5) / np.absolute(
2 * left_fit_cr[0]) | af1cd81c3eeb85297bcfcb44779bf86b4c6b8dc9 | 3,650,916 |
import os
def find_file_in_sequence(file_root: str, file_number: int = 1) -> tuple:
"""
Returns the Nth file in an image sequence where N is file_number (-1 for first file).
Args:
file_root: image file root name.
file_number: image file number in sequence.
Returns:
tuple (filename,sequencenumber).
"""
currentfolder = azcam.utils.curdir()
for _, _, files in os.walk(currentfolder):
break
for f in files:
if f.startswith(file_root):
break
try:
if not f.startswith(file_root):
raise azcam.AzcamError("image sequence not found")
except Exception:
raise azcam.AzcamError("image sequence not found")
firstfile = azcam.utils.fix_path(os.path.join(currentfolder, f))
firstsequencenumber = firstfile[-9:-5]
firstnum = firstsequencenumber
firstsequencenumber = int(firstsequencenumber)
sequencenumber = firstsequencenumber + file_number - 1
newnum = "%04d" % sequencenumber
filename = firstfile.replace(firstnum, newnum)
return (filename, sequencenumber) | 276838d6c8673f68182a9daded60762cddbf54d2 | 3,650,917 |
def testing_server_error_view(request):
"""Displays a custom internal server error (500) page"""
return render(request, '500.html', {}) | 84055f37d1ba215ae0e439c1f9d96260208133ff | 3,650,918 |
def main_epilog() -> str:
"""
This method builds the footer for the main help screen.
"""
msg = "To get help on a specific command, see `conjur <command> -h | --help`\n\n"
msg += "To start using Conjur with your environment, you must first initialize " \
"the configuration. See `conjur init -h` for more information."
return msg | ecf4167535b5f1e787d286a3b2194816790a7e6a | 3,650,919 |
def sigma_M(n):
"""boson lowering operator, AKA sigma minus"""
return np.diag([np.sqrt(i) for i in range(1, n)], k=1) | 532a082ed5fd3094044162c85042bf963dad4461 | 3,650,920 |
def windowing_is(root, *window_sys):
"""
Check for the current operating system.
:param root: A tk widget to be used as reference
:param window_sys: if any windowing system provided here is the current
windowing system `True` is returned else `False`
:return: boolean
"""
windowing = root.tk.call('tk', 'windowingsystem')
return windowing in window_sys | fd021039686b1971f8c5740beb804826a7afdf80 | 3,650,921 |
def init_columns_entries(variables):
"""
Making sure we have `columns` & `entries` to return, without effecting the original objects.
"""
columns = variables.get('columns')
if columns is None:
columns = [] # Relevant columns in proper order
if isinstance(columns, str):
columns = [columns]
else:
columns = list(columns)
entries = variables.get('entries')
if entries is None:
entries = [] # Entries of dict with relevant columns
elif isinstance(entries, dict):
entries = [entries]
else:
entries = list(entries)
return columns, entries | 49a12b0561d0581785c52d9474bc492f2c64626c | 3,650,922 |
from typing import Tuple
def _run_ic(dataset: str, name: str) -> Tuple[int, float, str]:
"""Run iterative compression on all datasets.
Parameters
----------
dataset : str
Dataset name.
name : str
FCL name.
Returns
-------
Tuple[int, float, str]
Solution size, time, and certificate.
"""
# Execute
time, size, certificate = solve_ic(
str(FCL_DATA_DIR / dataset / 'huffner' / (name + HUFFNER_DATA_EXT)),
timeout=EXACT_TIMEOUT,
preprocessing=2,
htime=min(0.3 * EXACT_TIMEOUT, 1)
)
# Return
return size, time, str(certificate) | e041cb9c0ca5af98d1f8d23a0e6f3cbe7f5a34a4 | 3,650,923 |
def notch(Wn, Q=10, analog=False, output="ba"):
"""
Design an analog or digital biquad notch filter with variable Q.
The notch differs from a peaking cut filter in that the gain at the
notch center frequency is 0, or -Inf dB.
Transfer function: H(s) = (s**2 + 1) / (s**2 + s/Q + 1)
Parameter
----------
Wn : float
Center frequency of the filter.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
Q : float
Quality factor of the filter. Examples:
* sqrt(2) is 1 octave wide
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'ss'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
state-space ('ss').
Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
"""
# H(s) = (s**2 + 1) / (s**2 + s/Q + 1)
b = np.array([1, 0, 1])
a = np.array([1, 1 / Q, 1])
return _transform(b, a, Wn, analog, output) | a9b4e488bb5a849459bf843abe2bd9d6d18f662d | 3,650,924 |
def Torus(radius=(1, 0.5), tile=(20, 20), device='cuda:0'):
"""
Creates a torus quad mesh
Parameters
----------
radius : (float,float) (optional)
radii of the torus (default is (1,0.5))
tile : (int,int) (optional)
the number of divisions of the cylinder (default is (20,20))
device : str or torch.device (optional)
the device the tensors will be stored to (default is 'cuda:0')
Returns
-------
(Tensor,LongTensor,Tensor)
the point set tensor, the topology tensor, the vertex normals
"""
T, P = grid2mesh(*tuple(TorusPatch(radius=radius, tile=tile, device=device)))
N = vertex_normal(P, quad2tri(T))
return P, T, N | 79c7934cabecdf3a4c9c28de7193ccae1ce037de | 3,650,925 |
def check_new_value(new_value: str, definition) -> bool:
"""
checks with definition if new value is a valid input
:param new_value: input to set as new value
:param definition: valid options for new value
:return: true if valid, false if not
"""
if type(definition) is list:
if new_value in definition:
return True
else:
return False
elif definition is bool:
if new_value == "true" or new_value == "false":
return True
else:
return False
elif definition is int:
try:
int(new_value)
return True
except ValueError:
return False
elif definition is float:
try:
float(new_value)
return True
except ValueError:
return False
elif definition is str:
return True
else:
# We could not validate the type or values so we assume it is incorrect
return False | d7204c7501e713c4ce8ecaeb30239763c13c1f18 | 3,650,926 |
def covid_API(cases_and_deaths: dict) -> dict:
"""
Imports Covid Data
:param cases_and_deaths: This obtains dictionary from config file
:return: A dictionary of covid information
"""
api = Cov19API(filters=england_only, structure=cases_and_deaths)
data = api.get_json()
return data | 8429c35770d25d595a6f51a2fe80d2eac585c785 | 3,650,927 |
def cnn(train_x, train_y, test1_x, test1_y, test2_x, test2_y):
"""
Train and evaluate a feedforward network with two hidden layers.
"""
# Add a single "channels" dimension at the end
trn_x = train_x.reshape([-1, 30, 30, 1])
tst1_x = test1_x.reshape([-1, 30, 30, 1])
tst2_x = test2_x.reshape([-1, 30, 30, 1])
# First layer will need argument `input_shape=(30,30,1)`
model = Sequential([
# TODO: add your implementation here
Conv2D(32, kernel_size = (5, 5), strides=(1, 1), activation='relu', input_shape=(30,30,1)),
MaxPooling2D(pool_size=(2, 2), strides=(2, 2)),
Conv2D(64, kernel_size = (5, 5), activation='relu'),
MaxPooling2D(pool_size=(2, 2)),
Flatten(),
Dense(512, activation='relu'),
Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(trn_x, train_y, epochs=5)
print("Evaluating CNN on test set 1")
model.evaluate(tst1_x, test1_y)
print("Evaluating CNN on test set 2")
return model.evaluate(tst2_x, test2_y) | e2607c35d2236188a2ec7fc797c43f6970665d12 | 3,650,928 |
def gather_gltf2(export_settings):
"""
Gather glTF properties from the current state of blender.
:return: list of scene graphs to be added to the glTF export
"""
scenes = []
animations = [] # unfortunately animations in gltf2 are just as 'root' as scenes.
active_scene = None
for blender_scene in bpy.data.scenes:
scenes.append(__gather_scene(blender_scene, export_settings))
if export_settings[gltf2_blender_export_keys.ANIMATIONS]:
animations += __gather_animations(blender_scene, export_settings)
if bpy.context.scene.name == blender_scene.name:
active_scene = len(scenes) -1
return active_scene, scenes, animations | 6a382349a1a2aef3d5d830265b0f7430440ac6ef | 3,650,929 |
import time
def getOneRunMountainCarFitness_modifiedReward(tup):
"""Get one fitness from the MountainCar or MountainCarContinuous
environment while modifying its reward function.
The MountainCar environments reward only success, not progress towards
success. This means that individuals that are trying to drive up the
hill, but not succeeding will get the exact same fitness as individuals
that do nothing at all. This function provides some reward to the
individual based on the maximum distance it made it up the hill.
Parameters: A tuple expected to contain the following:
0: individual - The model,
1: continuous - True if using MountainCarContinuous, false to use
MountainCar.
2: renderSpeed - None to not render, otherwise the number of seconds to
sleep between each frame; this can be a floating point
value."""
individual, continuous, renderSpeed = tup[0], tup[1], tup[2]
env = None
if continuous:
env = gym.make('MountainCarContinuous-v0')
else:
env = gym.make('MountainCar-v0')
maxFrames = 2000
runReward = 0
maxPosition = -1.2 # 1.2 is the minimum for this environment.
observation = env.reset()
individual.resetForNewTimeSeries()
for j in range(maxFrames):
# The continuous version doesn't required argmax, but it does need
# a conversion from a single value to the list that the environment
# expects:
if continuous:
action = [individual.calculateOutputs(observation)]
else:
action = np.argmax(individual.calculateOutputs(observation))
if renderSpeed is not None:
env.render()
if renderSpeed != 0:
time.sleep(renderSpeed)
observation, reward, done, info = env.step(action)
runReward += reward
# Record the furthest we made it up the hill:
maxPosition = max(observation[0], maxPosition)
if done:
break
env.close()
# Return the fitness, modified by the maxPosition attained. The position
# weighs heavier with the continuous version:
if continuous:
return runReward + (1000.0 * maxPosition)
else:
return runReward + (10.0 * maxPosition) | f17e768755d0b4862ee70a0fe7d317a8074d7852 | 3,650,930 |
def ArtToModel(art, options):
"""Convert an Art object into a Model object.
Args:
art: geom.Art - the Art object to convert.
options: ImportOptions - specifies some choices about import
Returns:
(geom.Model, string): if there was a major problem, Model may be None.
The string will be errors and warnings.
"""
pareas = art2polyarea.ArtToPolyAreas(art, options.convert_options)
if not pareas:
return (None, "No visible faces found")
if options.scaled_side_target > 0:
pareas.scale_and_center(options.scaled_side_target)
m = model.PolyAreasToModel(pareas, options.bevel_amount,
options.bevel_pitch, options.quadrangulate)
if options.extrude_depth > 0:
model.ExtrudePolyAreasInModel(m, pareas, options.extrude_depth,
options.cap_back)
return (m, "") | 3130471f7aa6b0b8fd097c97ca4916a51648112e | 3,650,931 |
def simulate_data(N, intercept, slope, nu, sigma2=1, seed=None):
"""Simulate noisy linear model with t-distributed residuals.
Generates `N` samples from a one-dimensional linear regression with
residuals drawn from a t-distribution with `nu` degrees of freedom, and
scaling-parameter `sigma2`. The true parameters of the linear model are
specified by the `intercept` and `slope` parameters.
Args:
N, int: Number of samples.
intercept, float: The intercept of the linear model.
slope, float: The slope of the linear model.
nu, float (>0): The degrees of freedom of the t-distribution.
sigma2, float (>0): The scale-parameter of the t-distribution.
seed, int: Set random seed for repeatability.
Return:
DataFrame containing N samples from noisy linear model.
"""
np.random.seed(seed)
# x ~ Uniform(0,1)
interval = np.linspace(0,1, num=2*N)
sample = np.random.choice(interval, size=N, replace=False)
df = pd.DataFrame({"x": sample})
# generate y values using linear model
linear_map = lambda x: intercept + slope*x
df['y'] = linear_map(df['x']) + sigma2*np.random.standard_t(nu, N)
return df | a88e7f1958876c3dd47101da7f2f1789e02e4d18 | 3,650,932 |
import os
def _link_irods_folder_to_django(resource, istorage, foldername, exclude=()):
"""
Recursively Link irods folder and all files and sub-folders inside the folder to Django
Database after iRODS file and folder operations to get Django and iRODS in sync
:param resource: the BaseResource object representing a HydroShare resource
:param istorage: REDUNDANT: IrodsStorage object
:param foldername: the folder name, as a fully qualified path
:param exclude: UNUSED: a tuple that includes file names to be excluded from
linking under the folder;
:return: List of ResourceFile of newly linked files
"""
if __debug__:
assert(isinstance(resource, BaseResource))
if istorage is None:
istorage = resource.get_irods_storage()
res_files = []
if foldername:
store = istorage.listdir(foldername)
# add files into Django resource model
for file in store[1]:
if file not in exclude:
file_path = os.path.join(foldername, file)
# This assumes that file_path is a full path
res_files.append(link_irods_file_to_django(resource, file_path))
# recursively add sub-folders into Django resource model
for folder in store[0]:
res_files = res_files + \
_link_irods_folder_to_django(resource, istorage,
os.path.join(foldername, folder), exclude)
return res_files | c0ff6fdbfae40f6c3e0cc462b924ff835dd6b20a | 3,650,933 |
def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:
"""
52ms 93.76%
13.1MB 83.1%
:param self:
:param head:
:param n:
:return:
"""
if not head:
return head
dummy = ListNode(0)
dummy.next = head
fast = dummy
while n:
fast = fast.next
n -= 1
slow = dummy
while fast and fast.next:
fast = fast.next
slow = slow.next
slow.next = slow.next.next
return dummy.next | 5b9fa939aec64425e7ca9932fe0cc5814fd0f608 | 3,650,934 |
from ..model.types import UnresolvedTypeReference
from typing import Any
from typing import Tuple
import warnings
def validate_template(
template: Any, allow_deprecated_identifiers: bool = False
) -> Tuple[PackageRef, str]:
"""
Return a module and type name component from something that can be interpreted as a template.
:param template:
Any object that can be interpreted as an identifier for a template.
:param allow_deprecated_identifiers:
Allow deprecated identifiers (:class:`UnresolvedTypeReference` and a period delimiter
instead of a colon between module names and entity names).
:return:
A tuple of package ID and ``Module.Name:EntityName`` (the package-scoped identifier for the
type). The special value ``'*'`` is used if either the package ID, module name, or both
should be wildcarded.
:raise ValueError:
If the object could not be interpreted as a thing referring to a template.
"""
if template == "*" or template is None:
return STAR, "*"
if allow_deprecated_identifiers:
warnings.warn(
"validate_template(..., allow_deprecated_identifiers=True) will be removed in dazl v8",
DeprecationWarning,
stacklevel=2,
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
# noinspection PyDeprecation
if isinstance(template, UnresolvedTypeReference):
template = template.name
if isinstance(template, str):
components = template.split(":")
if len(components) == 3:
# correct number of colons for a fully-qualified name
pkgid, m, e = components
return PackageRef(pkgid), f"{m}:{e}"
elif len(components) == 2:
# one colon, so assume the package ID is unspecified UNLESS the second component is a
# wildcard; then we assume the wildcard means any module name and entity name
m, e = components
if m == STAR and e != STAR and not allow_deprecated_identifiers:
# strings that look like "*:SOMETHING" are explicitly not allowed unless deprecated
# identifier support is requested; this is almost certainly an attempt to use
# periods instead of colons as a delimiter between module name and entity name
raise ValueError("string must be in the format PKG_REF:MOD:ENTITY or MOD:ENTITY")
return (STAR, f"{m}:{e}") if e != "*" else (PackageRef(m), "*")
elif len(components) == 1 and allow_deprecated_identifiers:
# no colon whatsoever
# TODO: Add a deprecation warning in the appropriate place
m, _, e = components[0].rpartition(".")
return STAR, f"{m}:{e}"
else:
raise ValueError("string must be in the format PKG_REF:MOD:ENTITY or MOD:ENTITY")
if isinstance(template, TypeConName):
return package_ref(template), package_local_name(template)
else:
raise ValueError(f"Don't know how to convert {template!r} into a template") | 32630bf3ebfd2626791bbb68dacc402d11b99c9a | 3,650,935 |
import logging
def call_status():
"""
Route received for webhook about call
"""
if 'mocean-call-uuid' in request.form:
call_uuid = request.form.get('mocean-call-uuid')
logging.info(f'### Call status received [{call_uuid}] ###')
for k, v in request.form.items():
logging.info(f'\t{k}:{v}')
if request.form.get('mocean-call-uuid') in calls \
and request.form.get('mocean-status') == 'HANGUP':
logging.debug(f'Deleting call-uuid[{call_uuid}] from calls dict')
del calls[call_uuid]
call_ended.append(call_uuid)
return Response('', status=204, mimetype='text/plain')
else:
return invalid_response() | 700af190ae3b7b74a361728b8907683db0e9338f | 3,650,936 |
import os
def read_data(filename,**kwargs):
"""Used to read a light curve or curve object in pickle format.
Either way, it'll come out as a curve object.
Parameters
----------
filename : str
Name of the file to be read (ascii or pickle)
Returns
-------
curve : :class:`~sntd.curve` or :class:`~sntd.curveDict`
"""
return(_switch(os.path.splitext(filename)[1])(filename,**kwargs)) | 3bf7af7342d2e1d6ea110e30ffbd55fd383885f4 | 3,650,937 |
import logging
def get_masters(domain):
""" """
content = request.get_json()
conf = {
'check_masters' : request.headers.get('check_masters'),
'remote_api' : request.headers.get('remote_api'),
'remote_api_key' : request.headers.get('remote_api_key')
}
masters = pdns_get_masters(
remote_api=conf['remote_api'],
remote_api_key=conf['remote_api_key'],
domain=domain
)
logging.info("masters: {}".format(masters))
return jsonify(masters) | dd006d889ee9f11a8f522a111ce7a4db4f5ba039 | 3,650,938 |
def SplitGeneratedFileName(fname):
"""Reverse of GetGeneratedFileName()
"""
return tuple(fname.split('x',4)) | 0210361d437b134c3c24a224ab93d2ffdcfc32ec | 3,650,939 |
def chooseBestFeatureToSplit(dataSet):
"""
选择最优划分特征
输入: 数据集
输出: 最优特征
"""
numFeatures = len(dataSet[0])-1
baseEntropy = calcShannonEnt(dataSet) #原始数据的熵
bestInfoGain = 0
bestFfeature = -1
for i in range(numFeatures): #循环所有特征
featList = [example[i] for example in dataSet]
uniqueVals = set(featList) #某个特征的取值,如[long,short]
newEntropy = 0
for value in uniqueVals:
subDataSet = splitDataSet(dataSet,i,value) #按某一特征的取值分类,如Long
prob = len(subDataSet)/float(len(dataSet))
newEntropy += prob*calcShannonEnt(subDataSet) #计算按该特征分类的熵,如DATASET(LONG)和DATASET(Short)的熵
infoGain = baseEntropy - newEntropy #计算增益,原始熵-Dataset(long)的熵-Dataset(short)的熵
if (infoGain>bestInfoGain):
bestInfoGain = infoGain
bestFfeature = i #选出最优分类特征
return bestFfeature | 1e9935cf280b5bf1a32f34187038301109df7d19 | 3,650,940 |
import torch
import tqdm
def evaluate_model(model: torch.nn.Module, dataloader: torch.utils.data.DataLoader, device: torch.device):
"""Function for evaluation of a model `model` on the data in `dataloader` on device `device`"""
# Define a loss (mse loss)
mse = torch.nn.MSELoss()
# We will accumulate the mean loss in variable `loss`
loss = torch.tensor(0., device=device)
with torch.no_grad(): # We do not need gradients for evaluation
# Loop over all samples in `dataloader`
for data in tqdm(dataloader, desc="scoring", position=0):
# Get a sample and move inputs and targets to device
inputs, targets, mask = data
inputs = inputs.to(device)
targets = targets.to(device)
mask = mask.to(device)
# mask = mask.to(dtype=torch.bool)
# Get outputs for network
outputs = model(inputs) * mask
# predictions = [outputs[i, mask[i]] for i in range(len(outputs))]
# Here we could clamp the outputs to the minimum and maximum values of inputs for better performance
# Calculate mean mse loss over all samples in dataloader (accumulate mean losses in `loss`)
# losses = torch.stack([mse(prediction, target.reshape((-1,))) for prediction, target in zip(predictions, targets)])
# loss = losses.mean()
loss = mse(outputs, targets)
return loss | e550c469d0b66cc0a0ef32d2907521c77ed760fa | 3,650,941 |
def get_DOE_quantity_byfac(DOE_xls, fac_xls, facilities='selected'):
"""
Returns total gallons of combined imports and exports
by vessel type and oil classification to/from WA marine terminals
used in our study.
DOE_xls[Path obj. or string]: Path(to Dept. of Ecology transfer dataset)
facilities [string]: 'all' or 'selected'
"""
# convert inputs to lower-case
#transfer_type = transfer_type.lower()
facilities = facilities.lower()
# Import Department of Ecology data:
print('get_DOE_quantity_byfac: not yet tested with fac_xls as input')
df = get_DOE_df(DOE_xls, fac_xls)
# get list of oils grouped by our monte_carlo oil types
oil_types = [
'akns', 'bunker', 'dilbit',
'jet', 'diesel', 'gas', 'other'
]
# names of oil groupings that we want for our output/graphics
oil_types_graphics = [
'ANS', 'Bunker-C', 'Dilbit',
'Jet Fuel', 'Diesel', 'Gasoline',
'Other'
]
oil_classification = get_DOE_oilclassification(DOE_xls)
# SELECTED FACILITIES
exports={}
imports={}
combined={}
if facilities == 'selected':
# The following list includes facilities used in Casey's origin/destination
# analysis with names matching the Dept. of Ecology (DOE) database.
# For example, the shapefile "Maxum Petroleum - Harbor Island Terminal" is
# labeled as 'Maxum (Rainer Petroleum)' in the DOE database. I use the
# Ecology language here and will need to translate to Shapefile speak
# If facilities are used in output to compare with monte-carlo transfers
# then some terminals will need to be grouped, as they are in the monte carlo.
# Terminal groupings in the voyage joins are: (1)
# 'Maxum (Rainer Petroleum)' and 'Shell Oil LP Seattle Distribution Terminal'
# are represented in
# ==>'Kinder Morgan Liquids Terminal - Harbor Island', and
# (2) 'Nustar Energy Tacoma' => 'Phillips 66 Tacoma Terminal'
facility_names = [
'Alon Asphalt Company (Paramount Petroleum)',
'Andeavor Anacortes Refinery (formerly Tesoro)',
'BP Cherry Point Refinery',
'Kinder Morgan Liquids Terminal - Harbor Island' ,
'Maxum (Rainer Petroleum)',
'Naval Air Station Whidbey Island (NASWI)',
'NAVSUP Manchester',
'Nustar Energy Tacoma',
'Phillips 66 Ferndale Refinery',
'Phillips 66 Tacoma Terminal',
'SeaPort Sound Terminal',
'Shell Oil LP Seattle Distribution Terminal',
'Shell Puget Sound Refinery',
'Tesoro Port Angeles Terminal','U.S. Oil & Refining',
'Tesoro Pasco Terminal', 'REG Grays Harbor, LLC',
'Tesoro Vancouver Terminal',
'Tidewater Snake River Terminal',
'Tidewater Vancouver Terminal',
'TLP Management Services LLC (TMS)'
]
for vessel_type in ['atb','barge','tanker']:
exports[vessel_type]={}
imports[vessel_type]={}
combined[vessel_type]={}
if vessel_type == 'barge':
print('Tallying barge quantities')
# get transfer quantities by oil type
type_description = ['TANK BARGE','TUGBOAT']
for oil in oil_types:
# exports
exports[vessel_type][oil] = df.loc[
(df.TransferType == 'Cargo') &
(df.ReceiverTypeDescription.isin(type_description)) &
(~df.Receiver.str.contains('ITB')) &
(~df.Receiver.str.contains('ATB')) &
(df.Deliverer.isin(facility_names)) &
(df.Product.isin(oil_classification[oil])),
['TransferQtyInGallon', 'Product']
].TransferQtyInGallon.sum()
# imports
imports[vessel_type][oil] = df.loc[
(df.TransferType == 'Cargo') &
(df.DelivererTypeDescription.isin(type_description)) &
(~df.Deliverer.str.contains('ITB')) &
(~df.Deliverer.str.contains('ATB')) &
(df.Receiver.isin(facility_names)) &
(df.Product.isin(oil_classification[oil])),
['TransferQtyInGallon', 'Product']
].TransferQtyInGallon.sum()
elif vessel_type == 'tanker':
print('Tallying tanker quantities')
# get transfer quantities by oil type
type_description = ['TANK SHIP']
for oil in oil_types:
# exports
exports[vessel_type][oil] = df.loc[
(df.TransferType == 'Cargo') &
(df.ReceiverTypeDescription.isin(type_description)) &
(df.Deliverer.isin(facility_names)) &
(df.Product.isin(oil_classification[oil])),
['TransferQtyInGallon', 'Product']
].TransferQtyInGallon.sum()
# imports
imports[vessel_type][oil] = df.loc[
(df.TransferType == 'Cargo') &
(df.DelivererTypeDescription.isin(type_description)) &
(df.Receiver.isin(facility_names)) &
(df.Product.isin(oil_classification[oil])),
['TransferQtyInGallon', 'Product']
].TransferQtyInGallon.sum()
elif vessel_type == 'atb':
print('Tallying atb quantities')
# get transfer quantities by oil type
type_description = ['TANK BARGE','TUGBOAT']
for oil in oil_types:
# exports
exports[vessel_type][oil] = df.loc[
(df.TransferType == 'Cargo') &
(df.ReceiverTypeDescription.isin(type_description)) &
(df.Receiver.str.contains('ITB') |
df.Receiver.str.contains('ATB')) &
(df.Deliverer.isin(facility_names))&
(df.Product.isin(oil_classification[oil])),
['TransferQtyInGallon', 'Product']
].TransferQtyInGallon.sum()
# imports
imports[vessel_type][oil] = df.loc[
(df.TransferType == 'Cargo') &
(df.DelivererTypeDescription.isin(type_description)) &
(df.Deliverer.str.contains('ITB') |
df.Deliverer.str.contains('ATB')) &
(df.Receiver.isin(facility_names))&
(df.Product.isin(oil_classification[oil])),
['TransferQtyInGallon', 'Product']
].TransferQtyInGallon.sum()
# combine imports and exports and convert oil type names to
# those we wish to use for graphics/presentations
# The name change mostly matters for AKNS -> ANS.
for idx,oil in enumerate(oil_types):
# convert names
exports[vessel_type][oil_types_graphics[idx]] = (
exports[vessel_type][oil]
)
imports[vessel_type][oil_types_graphics[idx]] = (
imports[vessel_type][oil]
)
# remove monte-carlo names
exports[vessel_type].pop(oil)
imports[vessel_type].pop(oil)
# combine imports and exports
combined[vessel_type][oil_types_graphics[idx]] = (
imports[vessel_type][oil_types_graphics[idx]] + \
exports[vessel_type][oil_types_graphics[idx]]
)
return exports, imports, combined | 371fd9b2bc0f9e45964af5295de1edad903729c9 | 3,650,942 |
import re
def number_finder(page, horse):
"""Extract horse number with regex."""
if 'WinPlaceShow' in page:
return re.search('(?<=WinPlaceShow\\n).[^{}]*'.format(horse), page).group(0)
elif 'WinPlace' in page:
return re.search('(?<=WinPlace\\n).[^{}]*'.format(horse), page).group(0) | 483067fcfa319a7dfe31fdf451db82550fd35d03 | 3,650,943 |
from ...data import COCODetection
def ssd_300_mobilenet0_25_coco(pretrained=False, pretrained_base=True, **kwargs):
"""SSD architecture with mobilenet0.25 base networks for COCO.
Parameters
----------
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
pretrained_base : bool or str, optional, default is True
Load pretrained base network, the extra layers are randomized.
norm_layer : object
Normalization layer used (default: :class:`mxnet.gluon.nn.BatchNorm`)
Can be :class:`mxnet.gluon.nn.BatchNorm` or :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
norm_kwargs : dict
Additional `norm_layer` arguments, for example `num_devices=4`
for :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
Returns
-------
HybridBlock
A SSD detection network.
"""
classes = COCODetection.CLASSES
return get_ssd('mobilenet0.25', 300,
features=['relu22_fwd', 'relu26_fwd'],
filters=[256, 256, 128, 128],
sizes=[21, 45, 99, 153, 207, 261, 315],
ratios=[[1, 2, 0.5]] + [[1, 2, 0.5, 3, 1.0/3]] * 3 + [[1, 2, 0.5]] * 2,
steps=[8, 16, 32, 64, 100, 300],
classes=classes, dataset='coco', pretrained=pretrained,
pretrained_base=pretrained_base, **kwargs) | 5c234e824d60a116b7640eff4c50adba98792927 | 3,650,944 |
def get_project_by_id(client: SymphonyClient, id: str) -> Project:
"""Get project by ID
:param id: Project ID
:type id: str
:raises:
* FailedOperationException: Internal symphony error
* :class:`~psym.exceptions.EntityNotFoundError`: Project does not exist
:return: Project
:rtype: :class:`~psym.common.data_class.Project`
**Example**
.. code-block:: python
project = client.get_project_by_id(
id="12345678",
)
"""
result = ProjectDetailsQuery.execute(client, id=id)
if result is None:
raise EntityNotFoundError(entity=Entity.Project, entity_id=id)
return format_to_project(project_fragment=result) | 72904b1f72eb2ce3e031df78d8f00cef8d5b5791 | 3,650,945 |
def write_trans_output(k, output_fname, output_steps_fname, x, u, time, nvar):
"""
Output transient step and spectral step in a CSV file"""
# Transient
if nvar > 1:
uvars = np.split(u, nvar)
results_u = [np.linalg.norm(uvar, np.inf) for uvar in uvars]
results = [
time,
]
results[1:1] = results_u
else:
results = [time, np.linalg.norm(u, np.inf)]
fmt = ["%1.4e"]
fmt_var = ["%1.4e"] * nvar
fmt[1:1] = fmt_var
with open(output_fname, "a+", newline="") as write_obj:
np.savetxt(
write_obj,
[results],
fmt=fmt,
comments="",
delimiter=",",
)
# Spectral
if bool(output_steps_fname): # string not empty
filename = output_steps_fname + str(k) + ".csv"
if nvar > 1:
uvars = np.split(u, nvar)
uvars = [np.concatenate([[0.0], uvar, [0.0]]) for uvar in uvars]
uvars = np.array(uvars)
header = ["x"]
header_var = ["u" + str(int(k)) for k in range(nvar)]
header[1:1] = header_var
header = ",".join(header)
data = np.column_stack((np.flip(x), uvars.transpose()))
else:
u = np.concatenate([[0.0], u, [0.0]])
header = "x,u"
data = np.column_stack((np.flip(x), u))
np.savetxt(
filename, data, delimiter=",", fmt="%1.4e", header=header, comments=""
)
return None | 5681902519af79777f8fb5aa2a36f8445ee4cf32 | 3,650,946 |
def browser(browserWsgiAppS):
"""Fixture for testing with zope.testbrowser."""
assert icemac.addressbook.testing.CURRENT_CONNECTION is not None, \
"The `browser` fixture needs a database fixture like `address_book`."
return icemac.ab.calendar.testing.Browser(wsgi_app=browserWsgiAppS) | 47c9a0d4919be55d15a485632bca826183ba92b2 | 3,650,947 |
def mixture_fit(samples,
model_components,
model_covariance,
tolerance,
em_iterations,
parameter_init,
model_verbosity,
model_selection,
kde_bandwidth):
"""Fit a variational Bayesian non-parametric Gaussian mixture model to samples.
This function takes the parameters described below to initialize and then fit a
model to a provided set of data points. It returns a Scikit-learn estimator object
that can then be used to generate samples from the distribution approximated by the
model and score the log-probabilities of data points based on the returned model.
Parameters:
-----------
samples : array-like
The set of provided data points that the function's model should be fitted to.
model_components : int, defaults to rounding up (2 / 3) * the number of dimensions
The maximum number of Gaussians to be fitted to data points in each iteration.
model_covariance : {'full', 'tied', 'diag', 'spherical'}
The type of covariance parameters the model should use for the fitting process.
tolerance : float
The model's convergence threshold at which the model's fit is deemed finalized.
em_iterations : int
The maximum number of expectation maximization iterations the model should run.
parameter_init : {'kmeans', 'random'}
The method used to initialize the model's weights, the means and the covariances.
model_verbosity : {0, 1, 2}
The amount of information that the model fitting should provide during runtime.
model_selection : {'gmm', 'kde'}
The selection of the type of model that should be used for the fitting process,
i.e. either a variational Bayesian non-parametric GMM or kernel density estimation.
kde_bandwidth : float
The kernel bandwidth that should be used in the case of kernel density estimation.
Returns:
--------
model : sklearn estimator
A variational Bayesian non-parametric Gaussian mixture model fitted to samples.
Attributes:
-----------
fit(X) : Estimate a model's parameters with the expectation maximization algorithm.
sample(n_samples=1) : Generate a new set of random data points from fitted Gaussians.
score_samples(X) : Calculate the weighted log-probabilities for each data point.
"""
# Check which type of model should be used for the iterative fitting process
if model_selection == 'gmm':
# Initialize a variational Bayesian non-parametric GMM for fitting
model = BGM(n_components = model_components,
covariance_type = model_covariance,
tol = tolerance,
max_iter = em_iterations,
init_params = parameter_init,
verbose = model_verbosity,
verbose_interval = 10,
warm_start = False,
random_state = 42,
weight_concentration_prior_type = 'dirichlet_process')
if model_selection == 'kde':
model = KD(bandwidth = kde_bandwidth,
kernel = 'gaussian',
metric = 'euclidean',
algorithm = 'auto',
breadth_first = True,
atol = 0.0,
rtol = tolerance)
# Fit the previously initialized model to the provided data points
model.fit(np.asarray(samples))
return model | 807f0ef2028a5dcb99052e6b86558f8b325405db | 3,650,948 |
import yaml
import logging
def apply_k8s_specs(specs, mode=K8S_CREATE): # pylint: disable=too-many-branches,too-many-statements
"""Run apply on the provided Kubernetes specs.
Args:
specs: A list of strings or dicts providing the YAML specs to
apply.
mode: (Optional): Mode indicates how the resources should be created.
K8S_CREATE - Use the create verb. Works with generateName
K8S_REPLACE - Issue a delete of existing resources before doing a create
K8s_CREATE_OR_REPLACE - Try to create an object; if it already exists
replace it
"""
# TODO(jlewi): How should we handle patching existing updates?
results = []
if mode not in [K8S_CREATE, K8S_CREATE_OR_REPLACE, K8S_REPLACE]:
raise ValueError(f"Unknown mode {mode}")
for s in specs:
spec = s
if not isinstance(spec, dict):
spec = yaml.load(spec)
name = spec["metadata"]["name"]
namespace = spec["metadata"]["namespace"]
kind = spec["kind"]
kind_snake = camel_to_snake(kind)
plural = spec["kind"].lower() + "s"
result = None
if not "/" in spec["apiVersion"]:
group = None
else:
group, version = spec["apiVersion"].split("/", 1)
if group is None or group.lower() == "apps":
if group is None:
api = k8s_client.CoreV1Api()
else:
api = k8s_client.AppsV1Api()
create_method_name = f"create_namespaced_{kind_snake}"
create_method_args = [namespace, spec]
replace_method_name = f"delete_namespaced_{kind_snake}"
replace_method_args = [name, namespace]
else:
api = k8s_client.CustomObjectsApi()
create_method_name = f"create_namespaced_custom_object"
create_method = getattr(api, create_method_name)
create_method_args = [group, version, namespace, plural, spec]
delete_options = k8s_client.V1DeleteOptions()
replace_method_name = f"delete_namespaced_custom_object"
replace_method_args = [group, version, namespace, plural, name, delete_options]
create_method = getattr(api, create_method_name)
replace_method = getattr(api, replace_method_name)
if mode in [K8S_CREATE, K8S_CREATE_OR_REPLACE]:
try:
result = create_method(*create_method_args)
result_namespace, result_name = _get_result_name(result)
logging.info(f"Created {kind} {result_namespace}.{result_name}")
results.append(result)
continue
except k8s_rest.ApiException as e:
# 409 is conflict indicates resource already exists
if e.status == 409 and mode == K8S_CREATE_OR_REPLACE:
pass
else:
raise
# Using replace didn't work for virtualservices so we explicitly delete
# and then issue a create
result = replace_method(*replace_method_args)
logging.info(f"Deleted {kind} {namespace}.{name}")
result = create_method(*create_method_args)
result_namespace, result_name = _get_result_name(result)
logging.info(f"Created {kind} {result_namespace}.{result_name}")
# Now recreate it
results.append(result)
return results | 6420b08b8198ba59594c165b973b97161dd4bac3 | 3,650,949 |
def local_coherence(Q, ds=1):
""" estimate the local coherence of a spectrum
Parameters
----------
Q : numpy.array, size=(m,n), dtype=complex
array with cross-spectrum, with centered coordinate frame
ds : integer, default=1
kernel radius to describe the neighborhood
Returns
-------
M : numpy.array, size=(m,n), dtype=float
vector coherence from no to ideal, i.e.: 0...1
See Also
--------
thresh_masking
Example
-------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from ..generic.test_tools import create_sample_image_pair
>>> # create cross-spectrum with random displacement
>>> im1,im2,_,_,_ = create_sample_image_pair(d=2**4, max_range=1)
>>> spec1,spec2 = np.fft.fft2(im1), np.fft.fft2(im2)
>>> Q = spec1 * np.conjugate(spec2)
>>> Q = normalize_spectrum(Q)
>>> Q = np.fft.fftshift(Q) # transform to centered grid
>>> C = local_coherence(Q)
>>> plt.imshow(C), cmap='OrRd'), plt.colorbar(), plt.show()
>>> plt.imshow(Q), cmap='twilight'), plt.colorbar(), plt.show()
"""
assert type(Q) == np.ndarray, ("please provide an array")
diam = 2 * ds + 1
C = np.zeros_like(Q)
(isteps, jsteps) = np.meshgrid(np.linspace(-ds, +ds, 2 * ds + 1, dtype=int), \
np.linspace(-ds, +ds, 2 * ds + 1, dtype=int))
IN = np.ones(diam ** 2, dtype=bool)
IN[diam ** 2 // 2] = False
isteps, jsteps = isteps.flatten()[IN], jsteps.flatten()[IN]
for idx, istep in enumerate(isteps):
jstep = jsteps[idx]
Q_step = np.roll(Q, (istep, jstep))
# if the spectrum is normalized, then no division is needed
C += Q * np.conj(Q_step)
C = np.abs(C) / np.sum(IN)
return C | deb0d52e6852e02e1a92e64a7979585b888753f7 | 3,650,950 |
def find_best_lexer(text, min_confidence=0.85):
"""
Like the built in pygments guess_lexer, except has a minimum confidence
level. If that is not met, it falls back to plain text to avoid bad
highlighting.
:returns: Lexer instance
"""
current_best_confidence = 0.0
current_best_lexer = None
for lexer in _iter_lexerclasses():
confidence = lexer.analyse_text(text)
if confidence == 1.0:
return lexer()
elif confidence > current_best_confidence:
current_best_confidence = confidence
current_best_lexer = lexer
if current_best_confidence >= min_confidence:
return current_best_lexer()
else:
return TextLexer() | 57cffae3385886cc7841086697ce30ff10bb3bd8 | 3,650,951 |
def volta(contador, quantidade):
"""
Volta uma determinada quantidade de caracteres
:param contador: inteiro utilizado para determinar uma posição na string
:param quantidade: inteiro utilizado para determinar a nova posição na string
:type contador: int
:type quantidade: int
:return: retorna o novo contador
:rtype: int
"""
return contador - quantidade | 4183afebdfc5273c05563e4675ad5909124a683a | 3,650,952 |
from operator import and_
def keep_room(session, worker_id, room_id):
"""Try to keep a room"""
# Update room current timestamp
query = update(
Room
).values({
Room.updated: func.now(),
}).where(
and_(Room.worker == worker_id,
Room.id == room_id)
)
proxy = session.execute(query)
session.commit()
return proxy.rowcount == 1 | b4dbbc972d7fd297bf55b205e92d2126a5a68e6e | 3,650,953 |
from typing import List
def get_rounds(number: int) -> List[int]:
"""
:param number: int - current round number.
:return: list - current round and the two that follow.
"""
return list(range(number, number + 3)) | 9bf55545404acd21985c1765906fc439f5f4aed6 | 3,650,954 |
from bs4 import BeautifulSoup
from datetime import datetime
def parse_pasinobet(url):
"""
Retourne les cotes disponibles sur pasinobet
"""
selenium_init.DRIVER["pasinobet"].get("about:blank")
selenium_init.DRIVER["pasinobet"].get(url)
match_odds_hash = {}
match = None
date_time = None
WebDriverWait(selenium_init.DRIVER["pasinobet"], 15).until(
EC.invisibility_of_element_located(
(By.CLASS_NAME, "skeleton-line")) or sportsbetting.ABORT
)
if sportsbetting.ABORT:
raise sportsbetting.AbortException
inner_html = selenium_init.DRIVER["pasinobet"].execute_script(
"return document.body.innerHTML")
soup = BeautifulSoup(inner_html, features="lxml")
date = ""
for line in soup.findAll():
if sportsbetting.ABORT:
raise sportsbetting.AbortException
if "class" in line.attrs and "category-date" in line["class"]:
date = line.text.lower()
date = date.replace("nov", "novembre")
date = date.replace("déc", "décembre")
if "class" in line.attrs and "event-title" in line["class"]:
match = " - ".join(map(lambda x: list(x.stripped_strings)[0],
line.findChildren("div", {"class": "teams-container"})))
if "class" in line.attrs and "time" in line["class"]:
try:
date_time = datetime.datetime.strptime(
date+line.text.strip(), "%A, %d %B %Y%H:%M")
except ValueError:
date_time = "undefined"
if "class" in line.attrs and "event-list" in line["class"]:
if "---" not in list(line.stripped_strings):
odds = list(map(float, line.stripped_strings))
match_odds_hash[match] = {}
match_odds_hash[match]["date"] = date_time
match_odds_hash[match]["odds"] = {"pasinobet": odds}
return match_odds_hash | 5cda34741f4e6cc26e2ecccec877c9af2426084a | 3,650,955 |
def create_toolbutton(parent, icon=None, tip=None, triggered=None):
"""Create a QToolButton."""
button = QToolButton(parent)
if icon is not None:
button.setIcon(icon)
if tip is not None:
button.setToolTip(tip)
if triggered is not None:
button.clicked.connect(triggered)
return button | dfff516f498f924ca5d5d6b15d94907ed2e06029 | 3,650,956 |
import select
def __basic_query(model, verbose: bool = False) -> pd.DataFrame:
"""Execute and return basic query."""
stmt = select(model)
if verbose:
print(stmt)
return pd.read_sql(stmt, con=CONN, index_col="id") | eb9c44eb64144b1e98e310e2dd026e5b1e912619 | 3,650,957 |
def format_data_preprocessed(data, dtype = np.float):
"""
The input data preprocessing
data the input data frame
preprocessing whether to use features preprocessing (Default: False)
dtype the data type for ndarray (Default: np.float)
"""
train_flag = np.array(data['train_flag'])
print 'Formatting input data, size: %d' % (len(train_flag))
# outputs, nans excluded
y = data.loc[ :,'y1':'y3']
# replace nans with 0
y.fillna(0, inplace=True)
# collect only train data
ytr = np.array(y)[train_flag]
# collect only validation data
yvl = np.array(y)[~train_flag]
print 'Train data outputs collected, size: %d' % (len(ytr))
print '\n\nData before encoding\n\n%s' % data.describe()
# dropping target and synthetic columns
data.drop(['y1','y2','y3','train_flag', 'COVAR_y1_MISSING', 'COVAR_y2_MISSING', 'COVAR_y3_MISSING'], axis=1, inplace=True)
print '\n\nData after encoding\n\n%s' % data.describe()
# split into training and test
X = np.array(data).astype(dtype)
Xtr = X[train_flag]
Xvl = X[~train_flag]
#print 'Train data first: %s' % (Xtr[0])
#print 'Evaluate data first: %s' % (Xvl[0])
return Xtr, ytr, Xvl, yvl | a5785ef81a0f5d35f8fb73f72fbe55084bc5e2b0 | 3,650,958 |
def route_yo(oauth_client=None):
"""Sends a Yo!
We can defer sender lookup to the Yo class since it should be obtained
from the request context. Requiring an authenticated user reduces the
likelihood of accidental impersonation of senders.
Creating pseudo users is handled here. It should be limited to only
users on the app, as soon as we figure out how to do that.
"""
if 'polls' in request.user_agent.string.lower():
return route_polls_reply()
if 'status' in request.user_agent.string.lower():
return status.route_reply()
# TODO: since we weren't recording udids at signup
# record it here if provided. In the future this needs
# to be removed as it can pose a security risk.
user = g.identity.user
phone = request.json.get('phone_number')
recipients = request.json.get('to') or request.json.get('username')
if phone and not recipients:
to_user = upsert_pseudo_user(phone)
recipients = to_user.username if to_user else None
form_args = {'context': request.json.get('context') or None,
'header': request.json.get('header') or None,
'link': request.json.get('link') or None,
'location': request.json.get('location') or None,
'recipients': recipients,
'sound': request.json.get('sound'),
'yo_id': request.json.get('yo_id') or None
}
form = SendYoForm.from_json(form_args)
form.validate()
cover = request.json.get('cover')
photo = request.json.get('photo')
context_id = request.json.get('context_identifier')
reply_to = request.json.get('reply_to')
response_pair = request.json.get('response_pair')
text = request.json.get('text')
left_link = request.json.get('left_link')
right_link = request.json.get('right_link')
is_poll = request.json.get('is_poll')
region_name = request.json.get('region_name')
is_push_only = request.json.get('is_push_only')
if request.headers.get('X-APP-ID'):
app_id = request.headers.get('X-APP-ID')
sound = 'no.mp3'
else:
app_id = 'co.justyo.yoapp'
sound = form.sound.data
yo = send_yo(sender=user, recipients=form.recipients.data,
sound=sound, link=form.link.data,
location=form.location.data, header=form.header.data,
yo_id=form.yo_id.data, context=form.context.data,
cover=cover, photo=photo, context_id=context_id,
reply_to=reply_to, response_pair=response_pair,
oauth_client=oauth_client, text=text,
left_link=left_link, right_link=right_link,
is_poll=is_poll, region_name=region_name,
app_id=app_id, is_push_only=is_push_only)
contact, is_first_yo = upsert_yo_contact(yo)
#if context_id:
# mixpanel_yoapp.track(yo.recipient.user_id, 'Yo Sent', {'Type': context_id})
# Send response yo if needed.
# NOTE: By leaving this as-is groups are allowed to send
# welcome links.
if reply_to is None:
if is_first_yo and yo.recipient.welcome_link:
send_response_yo.delay(yo.yo_id, use_welcome_link=True)
elif yo.should_trigger_response():
send_response_yo.delay(yo.yo_id)
response = {'success': True, 'yo_id': yo.yo_id}
if yo.recipient:
recipient_dict = yo.recipient.get_public_dict(contact.get_name())
response.update({'recipient': recipient_dict})
if yo.not_on_yo:
response.update({'not_on_yo': yo.not_on_yo})
return make_json_response(response) | c729da43a9d4c2ed86c6e9a2e3ddb187b4af6eef | 3,650,959 |
def get_word_idxs_1d(context, token_seq, char_start_idx, char_end_idx):
"""
0 based
:param context:
:param token_seq:
:param char_start_idx:
:param char_end_idx:
:return: 0-based token index sequence in the tokenized context.
"""
spans = get_1d_spans(context,token_seq)
idxs = []
for wordIdx, span in enumerate(spans):
if not (char_end_idx <= span[0] or char_start_idx >= span[1]):
idxs.append(wordIdx)
assert len(idxs) > 0, "{} {} {} {}".format(context, token_seq, char_start_idx, char_end_idx)
return idxs | b279a3baea0e9646b55e598fd6ae16df70de5100 | 3,650,960 |
import binascii
def create_b64_from_private_key(private_key: X25519PrivateKey) -> bytes:
"""Create b64 ascii string from private key object"""
private_bytes = private_key_to_bytes(private_key)
b64_bytes = binascii.b2a_base64(private_bytes, newline=False)
return b64_bytes | 3abd69bcd3fc254c94da9fac446c6ffbc462f58d | 3,650,961 |
def create_fake_record(filename):
"""Create records for demo purposes."""
data_to_use = _load_json(filename)
data_acces = {
"access_right": fake_access_right(),
"embargo_date": fake_feature_date(),
}
service = Marc21RecordService()
draft = service.create(
data=data_to_use, identity=system_identity(), access=data_acces
)
record = service.publish(id_=draft.id, identity=system_identity())
return record | 744ed3a3b13bc27d576a31d565d846850e6640a3 | 3,650,962 |
import json
def load_configuration():
"""
This function loads the configuration from the
config.json file and then returns it.
Returns: The configuration
"""
with open('CONFIG.json', 'r') as f:
return json.load(f) | 91eae50d84ec9e4654ed9b8bcfa35215c8b6a7c2 | 3,650,963 |
import configparser
import os
import sys
def config_parse(profile_name):
"""Parse the profile entered with the command line. This profile is in the profile.cfg file.
These parameters are used to automate the processing
:param profile_name: Profile's name"""
config = configparser.ConfigParser()
config.read(os.path.dirname(sys.argv[0]) + "\\profile.cfg")
folder_string = config.get(profile_name, "folder_names")
folder_string = [i.strip() for i in folder_string.split(",")]
cam_names = config.get(profile_name, "cam_names")
cam_names = [i.strip() for i in cam_names.split(",")]
cam_bearing = config.get(profile_name, "cam_bearing")
cam_bearing = [int(i.strip()) for i in cam_bearing.split(",")]
cam_log_count = int(config.get(profile_name, "cam_log_count"))
distance_from_center = float(config.get(profile_name, "distance_from_center"))
min_pic_distance = float(config.get(profile_name, "min_pic_distance"))
try:
cam_log_position = config.get(profile_name, "cam_log_position")
cam_log_position = [int(i.strip()) for i in cam_log_position.strip(",")]
except:
cam_log_position = list(range(len(cam_names)))
return folder_string, cam_names, cam_log_position, cam_bearing, cam_log_count, distance_from_center, min_pic_distance | 45e56c4a5d55b46d11bf9064c6b72fed55ffa4c9 | 3,650,964 |
def scrape(webpage, linkNumber, extention):
"""
scrapes the main page of a news website using request and beautiful soup and
returns the URL link to the top article as a string
Args:
webpage: a string containing the URL of the main website
linkNumber: an integer pointing to the URL of the top article from the list
of all the URL's that have been scrapped
extention: a string containing the suffix of the URL to be sent to the
function sub_soup()
returns:
headline: a string containing the 500 word summary of the scrapped article
"""
# returns the link to the top headline link
req = Request(webpage, headers={'User-Agent':'Mozilla/5.0'})
webpage = urlopen(req).read()
soup = bs.BeautifulSoup(webpage,'lxml')
link = soup.find_all('a')
if linkNumber > 0:
story = (link[linkNumber])
sub_soup = str(extention + '{}'.format(story['href']))
elif linkNumber == -1:
sub_soup = articles[0][5]
elif linkNumber == -2:
link = soup.find('a',{'class':'gs-c-promo-heading'})
sub_soup = 'https://www.bbc.co.uk{}'.format(link['href'])
headline = sub_scrape(sub_soup)
return headline | f04cb8c8583f7f242ce70ec4da3e8f2556af7edb | 3,650,965 |
def Scheduler(type):
"""Instantiate the appropriate scheduler class for given type.
Args:
type (str): Identifier for batch scheduler type.
Returns:
Instance of a _BatchScheduler for given type.
"""
for cls in _BatchScheduler.__subclasses__():
if cls.is_scheduler_for(type):
return cls(type)
raise ValueError | 21074ecf33383b9f769e8dd63786194b4678246b | 3,650,966 |
def event_detail(request, event_id):
"""
A View to return an individual selected
event details page.
"""
event = get_object_or_404(Event, pk=event_id)
context = {
'event': event
}
return render(request, 'events/event_detail.html', context) | 6fda0906e70d88839fbcd26aa6724b5f2c433c07 | 3,650,967 |
from typing import Optional
from typing import Iterable
from typing import Tuple
import numpy
def variables(
metadata: meta.Dataset,
selected_variables: Optional[Iterable[str]] = None
) -> Tuple[dataset.Variable]:
"""Return the variables defined in the dataset.
Args:
selected_variables: The variables to return. If None, all the
variables are returned.
Returns:
The variables defined in the dataset.
"""
selected_variables = selected_variables or metadata.variables.keys()
return tuple(
dataset.VariableArray(
v.name,
numpy.ndarray((0, ) * len(v.dimensions), v.dtype),
v.dimensions,
attrs=v.attrs,
compressor=v.compressor,
fill_value=v.fill_value,
filters=v.filters,
) for k, v in metadata.variables.items() if k in selected_variables) | 6175ad712996a30673eb2f5ff8b64c76d2f4a66b | 3,650,968 |
def builder(tiledata, start_tile_id, version, clear_old_tiles=True):
"""
Deserialize a list of serialized tiles, then re-link all the tiles to
re-create the map described by the tile links
:param list tiledata: list of serialized tiles
:param start_tile_id: tile ID of tile that should be used as the start tile
:param str version: object model version of the tile data to be deserialized
:return: starting tile of built map
:rtype: text_game_maker.tile.tile.Tile
"""
tiles = {}
visited = []
if clear_old_tiles:
_tiles.clear()
for d in tiledata:
tile = deserialize(d, version)
tiles[tile.tile_id] = tile
if start_tile_id not in tiles:
raise RuntimeError("No tile found with ID '%s'" % start_tile_id)
tilestack = [tiles[start_tile_id]]
while tilestack:
t = tilestack.pop(0)
if t.tile_id in visited:
continue
visited.append(t.tile_id)
if isinstance(t, LockedDoor) and t.replacement_tile:
if t.replacement_tile:
t.replacement_tile = tiles[t.replacement_tile]
tilestack.append(t.replacement_tile)
if t.source_tile:
t.source_tile = tiles[t.source_tile]
tilestack.append(t.source_tile)
else:
for direction in ['north', 'south', 'east', 'west']:
tile_id = getattr(t, direction)
if not tile_id:
continue
setattr(t, direction, tiles[tile_id])
tilestack.append(tiles[tile_id])
return tiles[start_tile_id] | 235df5c953705fbbbd69d8f1c7ed1ad282b469ba | 3,650,969 |
import base64
def data_uri(content_type, data):
"""Return data as a data: URI scheme"""
return "data:%s;base64,%s" % (content_type, base64.urlsafe_b64encode(data)) | f890dc1310e708747c74337f5cfa2d6a31a23fc0 | 3,650,970 |
def next_line(ionex_file):
"""
next_line
Function returns the next line in the file
that is not a blank line, unless the line is
'', which is a typical EOF marker.
"""
done = False
while not done:
line = ionex_file.readline()
if line == '':
return line
elif line.strip():
return line | 053e5582e5146ef096d743973ea7069f19ae6d4d | 3,650,971 |
def last(value):
"""
returns the last value in a list (None if empty list) or the original if value not a list
:Example:
---------
>>> assert last(5) == 5
>>> assert last([5,5]) == 5
>>> assert last([]) is None
>>> assert last([1,2]) == 2
"""
values = as_list(value)
return values[-1] if len(values) else None | f3a04f0e2544879639b53012bbd9068ae205be18 | 3,650,972 |
import numpy
def levup(acur, knxt, ecur=None):
"""
LEVUP One step forward Levinson recursion
Args:
acur (array) :
knxt (array) :
Returns:
anxt (array) : the P+1'th order prediction polynomial based on the P'th
order prediction polynomial, acur, and the P+1'th order
reflection coefficient, Knxt.
enxt (array) : the P+1'th order prediction prediction error, based on the
P'th order prediction error, ecur.
References:
P. Stoica R. Moses, Introduction to Spectral Analysis Prentice Hall, N.J., 1997, Chapter 3.
"""
if acur[0] != 1:
raise ValueError(
'At least one of the reflection coefficients is equal to one.')
acur = acur[1:] # Drop the leading 1, it is not needed
# Matrix formulation from Stoica is used to avoid looping
anxt = numpy.concatenate((acur, [0])) + knxt * numpy.concatenate(
(numpy.conj(acur[-1::-1]), [1]))
enxt = None
if ecur is not None:
# matlab version enxt = (1-knxt'.*knxt)*ecur
enxt = (1. - numpy.dot(numpy.conj(knxt), knxt)) * ecur
anxt = numpy.insert(anxt, 0, 1)
return anxt, enxt | 182102d03369d23d53d21bae7209cf49d2caecb4 | 3,650,973 |
def gradient_output_wrt_input(model, img, normalization_trick=False):
"""
Get gradient of softmax with respect to the input.
Must check if correct.
Do not use
# Arguments
model:
img:
# Returns
gradient:
"""
grads = K.gradients(model.output, model.input)[0]
if normalization_trick:
grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5)
iterate = K.function([model.input], [grads])
grad_vals = iterate([img])[0]
gradient = grad_vals[0]
return gradient | ed45fccb0f412f8f8874cd8cd7f62ff2101a3a40 | 3,650,974 |
def response_GET(client, url):
"""Fixture that return the result of a GET request."""
return client.get(url) | b4762c9f652e714cc5c3694b75f935077039cb02 | 3,650,975 |
import sys
def load_python_object(name):
"""
Loads a python module from string
"""
logger = getLoggerWithNullHandler('commando.load_python_object')
(module_name, _, object_name) = name.rpartition(".")
if module_name == '':
(module_name, object_name) = (object_name, module_name)
try:
logger.debug('Loading module [%s]' % module_name)
module = __import__(module_name)
except ImportError:
raise CommandoLoaderException(
"Module [%s] cannot be loaded." % module_name)
if object_name == '':
return module
try:
module = sys.modules[module_name]
except KeyError:
raise CommandoLoaderException(
"Error occured when loading module [%s]" % module_name)
try:
logger.debug('Getting object [%s] from module [%s]' %
(object_name, module_name))
return getattr(module, object_name)
except AttributeError:
raise CommandoLoaderException(
"Cannot load object [%s]. "
"Module [%s] does not contain object [%s]. "
"Please fix the configuration or "
"ensure that the module is installed properly" % (
name,
module_name,
object_name)) | ba8db72c56929560b72de8330a3f703f96613763 | 3,650,976 |
import tqdm
def twitter_preprocess():
"""
ekphrasis-social tokenizer sentence preprocessor.
Substitutes a series of terms by special coins when called
over an iterable (dataset)
"""
norm = ['url', 'email', 'percent', 'money', 'phone', 'user',
'time', 'date', 'number']
ann = {"hashtag", "elongated", "allcaps", "repeated",
"emphasis", "censored"}
preprocessor = TextPreProcessor(
normalize=norm,
annotate=ann,
all_caps_tag="wrap",
fix_text=True,
segmenter="twitter_2018",
corrector="twitter_2018",
unpack_hashtags=True,
unpack_contractions=True,
spell_correct_elong=False,
tokenizer=SocialTokenizer(lowercase=True).tokenize,
dicts=[emoticons]).pre_process_doc
def preprocess(name, dataset):
description = " Ekphrasis-based preprocessing dataset "
description += "{}...".format(name)
data = [preprocessor(x) for x in tqdm(dataset, desc=description)]
return data
return preprocess | 18bcd48cff7c77480cd76165fef02d0e39ae19cc | 3,650,977 |
import math
def rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
"""
axis = np.asarray(axis)
axis = axis / math.sqrt(np.dot(axis, axis))
a = math.cos(theta / 2.0)
b, c, d = -axis * math.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac), 0],
[2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab), 0],
[2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc, 0],
[0,0,0,1]]) | cd940b60096fa0c92b8cd04d36a0d62d7cd46455 | 3,650,978 |
from typing import Type
from typing import List
def get_routes(interface: Type[Interface]) -> List[ParametrizedRoute]:
"""
Retrieves the routes from an interface.
"""
if not issubclass(interface, Interface):
raise TypeError('expected Interface subclass, got {}'
.format(interface.__name__))
routes = []
for member in interface.members():
if isinstance(member, _InterfaceMethod):
route_data = getattr(member.original, '__route__', None)
if route_data is not None:
assert isinstance(route_data, RouteData)
routes.append(ParametrizedRoute.from_function(
route_data, interface, member.original))
return routes | 9d3baf951312d3027e2329fa635b2425dda579e5 | 3,650,979 |
import os
import logging
import gzip
def _load_mnist_dataset(shape, path, name='mnist', url='http://yann.lecun.com/exdb/mnist/'):
"""A generic function to load mnist-like dataset.
Parameters:
----------
shape : tuple
The shape of digit images.
path : str
The path that the data is downloaded to.
name : str
The dataset name you want to use(the default is 'mnist').
url : str
The url of dataset(the default is 'http://yann.lecun.com/exdb/mnist/').
"""
path = os.path.join(path, name)
# Define functions for loading mnist-like data's images and labels.
# For convenience, they also download the requested files if needed.
def load_mnist_images(path, filename):
filepath = maybe_download_and_extract(filename, path, url)
logging.info(filepath)
# Read the inputs in Yann LeCun's binary format.
with gzip.open(filepath, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=16)
# The inputs are vectors now, we reshape them to monochrome 2D images,
# following the shape convention: (examples, channels, rows, columns)
data = data.reshape(shape)
# The inputs come as bytes, we convert them to float32 in range [0,1].
# (Actually to range [0, 255/256], for compatibility to the version
# provided at http://deeplearning.net/data/mnist/mnist.pkl.gz.)
return data / np.float32(256)
def load_mnist_labels(path, filename):
filepath = maybe_download_and_extract(filename, path, url)
# Read the labels in Yann LeCun's binary format.
with gzip.open(filepath, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=8)
# The labels are vectors of integers now, that's exactly what we want.
return data
# Download and read the training and test set images and labels.
logging.info("Load or Download {0} > {1}".format(name.upper(), path))
X_train = load_mnist_images(path, 'train-images-idx3-ubyte.gz')
y_train = load_mnist_labels(path, 'train-labels-idx1-ubyte.gz')
X_test = load_mnist_images(path, 't10k-images-idx3-ubyte.gz')
y_test = load_mnist_labels(path, 't10k-labels-idx1-ubyte.gz')
# We reserve the last 10000 training examples for validation.
X_train, X_val = X_train[:-10000], X_train[-10000:]
y_train, y_val = y_train[:-10000], y_train[-10000:]
# We just return all the arrays in order, as expected in main().
# (It doesn't matter how we do this as long as we can read them again.)
X_train = np.asarray(X_train, dtype=np.float32)
y_train = np.asarray(y_train, dtype=np.int32)
X_val = np.asarray(X_val, dtype=np.float32)
y_val = np.asarray(y_val, dtype=np.int32)
X_test = np.asarray(X_test, dtype=np.float32)
y_test = np.asarray(y_test, dtype=np.int32)
return X_train, y_train, X_val, y_val, X_test, y_test | a26fca118fe5ca146356b2fa949e1e346640ea46 | 3,650,980 |
def _get_realm(response):
"""Return authentication realm requested by server for 'Basic' type or None
:param response: requests.response
:type response: requests.Response
:returns: realm
:rtype: str | None
"""
if 'www-authenticate' in response.headers:
auths = response.headers['www-authenticate'].split(',')
basic_realm = next((auth_type for auth_type in auths
if auth_type.rstrip().lower().startswith("basic")),
None)
if basic_realm:
realm = basic_realm.split('=')[-1].strip(' \'\"').lower()
return realm
else:
return None
else:
return None | 346b3278eb52b565f747c952493c15820eece729 | 3,650,981 |
import math
def exp_mantissa(num, base=10):
"""Returns e, m such that x = mb^e"""
if num == 0:
return 1, 0
# avoid floating point error eg log(1e3, 10) = 2.99...
exp = math.log(abs(num), base)
exp = round(exp, FLOATING_POINT_ERROR_ON_LOG_TENXPONENTS)
exp = math.floor(exp) # 1 <= mantissa < 10
mantissa = num / (base**exp)
return exp, mantissa | b0fd7a961fbd0f796fc00a5ce4005c7aa9f92950 | 3,650,982 |
from typing import Callable
def decide_if_taxed(n_taxed: set[str]) -> Callable[[str], bool]:
"""To create an decider function for omitting taxation.
Args:
n_taxed: The set containing all items, which should not be taxed.
If empty, a default set will be chosen.
Returns:
Decider function for omitting taxation.
"""
local_set = _D_TAX_E
if n_taxed:
local_set = n_taxed
def _decide_if_taxed(in_str: str, /) -> bool:
"""To check whether an item is taxed or not.
A very simple function, which look up the item in a
given set. This set contains all item names, which should omitted
from taxation.
Args:
in_str: The name of the purchased item, which should be checked for taxation.
Returns:
Whether the item is taxed or not.
"""
for item_sub_name in in_str.split(" "):
if item_sub_name in local_set:
return False
return True
return _decide_if_taxed | c13c7e832b86bd85e2cade03cbc84a43893dfe17 | 3,650,983 |
def generate_two_cat_relation_heat_map():
"""
A correlation matrix for categories
"""
data = Heatmap(
z=df_categories.corr(),
y=df_categories.columns,
x=df_categories.columns)
title = 'Correlation Distribution of Categories'
y_title = 'Category'
x_title = 'Category'
return generate_graph_with_template(data, title, y_title, x_title) | 90efbffd54c723eef9297ba0abba71d55a500cd0 | 3,650,984 |
def build_phase2(VS, FS, NS, VT, VTN, marker, wc):
"""
Build pahase 2 sparse matrix M_P2 closest valid point term with of source vertices (nS)
triangles(mS) target vertices (nT)
:param VS: deformed source mesh from previous step nS x 3
:param FS: triangle index of source mesh mS * 3
:param NS: triangle normals of source mesh mS * 3
:param VT: target mesh nT * 3
:param VTN: Vertex normals of source mesh nT * 3
:param marker: marker constraint
:param wc: weight value
:return: M_P2: (3 * nS) x (3 * (nS + mS)) big sparse matrix
C_P2: (3 * nS) matrix
"""
VSN = calc_vertex_norm(FS, NS)
S_size = VS.shape[0]
valid_pt = np.zeros((S_size, 2))
C_P2 = np.zeros((3*S_size, 1))
for j in range(0, S_size):
if len(np.where(marker[:, 0]-1 == j)[0]) != 0:
valid_pt[j, :] = np.array([j, marker[marker[:, 0]-1 == j, 1] - 1], dtype=np.int32)
else:
valid_pt[j, :] = np.array([j, find_closest_validpt(VS[j, :], VSN[j, :], VT, VTN)], dtype=np.int32)
C_P2[np.linspace(0, 2, 3, dtype=np.int32) + j*3, 0] = wc * VT[int(valid_pt[j, 1]), :].T
M_P2 = sparse.coo_matrix((np.tile(wc, [3*S_size, 1])[:, 0], (np.arange(0, 3*S_size), np.arange(0, 3*S_size))), shape=(3*S_size, 3*(VS.shape[0]+FS.shape[0])))
return M_P2, C_P2 | ab3622f5b4377b1a60d34345d5396f66d5e3c641 | 3,650,985 |
def voronoi_to_dist(voronoi):
""" voronoi is encoded """
def decoded_nonstacked(p):
return np.right_shift(p, 20) & 1023, np.right_shift(p, 10) & 1023, p & 1023
x_i, y_i, z_i = np.indices(voronoi.shape)
x_v, y_v, z_v = decoded_nonstacked(voronoi)
return np.sqrt((x_v - x_i) ** 2 + (y_v - y_i) ** 2 + (z_v - z_i) ** 2) | 38c2630d45b281477531fcc845d34ea7b2980dab | 3,650,986 |
def post_update_view(request):
"""View To Update A Post For Logged In Users"""
if request.method == 'POST':
token_type, token = request.META.get('HTTP_AUTHORIZATION').split()
if(token_type != 'JWT'):
return Response({'detail': 'No JWT Authentication Token Found'}, status=status.HTTP_400_BAD_REQUEST)
token_data = {'token': token}
try:
valid_data = VerifyJSONWebTokenSerializer().validate(token_data)
logged_in_user = valid_data.get('user')
except:
return Response({'detail': 'Invalid Token'}, status.HTTP_400_BAD_REQUEST)
updated_data = request.data
instance = Post.objects.get(slug=updated_data.get('slug'))
admin_user = User.objects.get(pk=1) # PK Of Admin User Is 1
if(instance.author == logged_in_user or logged_in_user == admin_user):
updated_data.pop('slug')
serializer = PostUpdateSerializer(instance, data=updated_data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_202_ACCEPTED)
else:
return Response({'detail': 'Something Went Wrong.'}, status=status.HTTP_400_BAD_REQUEST)
else:
return Response({'detail': 'You Are Not Authorised To Edit This Post'}, status.HTTP_403_FORBIDDEN)
else:
return Response({'detail': 'You Are Not Authorised To Edit This Post'}, status.HTTP_403_FORBIDDEN) | 8044e12328c5bb63c48f673971ae1ed8727b02b7 | 3,650,987 |
from typing import List
def _is_binary_classification(class_list: List[str]) -> bool:
"""Returns true for binary classification problems."""
if not class_list:
return False
return len(class_list) == 1 | 82ada7dd8df93d58fad489b19b9bf4a93ee819c3 | 3,650,988 |
def create_post_like(author, post):
"""
Create a new post like given an author and post
"""
return models.Like.objects.create(author=author, post=post) | f8e07c10076015e005cd62bb3b39a5656ebc45a3 | 3,650,989 |
def translate_entries(yamldoc, base_url):
"""
Reads the field `entries` from the YAML document, processes each entry that is read using the
given base_url, and appends them all to a list of processed entries that is then returned.
"""
if 'entries' in yamldoc and type(yamldoc['entries']) is list:
entries = []
for i, entry in enumerate(yamldoc['entries']):
entries.append(process_entry(base_url, i, entry))
return entries | 0c949939020b3bb1017fca5543be8dcc77d03bbf | 3,650,990 |
def get_in(obj, lookup, default=None):
""" Walk obj via __getitem__ for each lookup,
returning the final value of the lookup or default.
"""
tmp = obj
for l in lookup:
try: # pragma: no cover
tmp = tmp[l]
except (KeyError, IndexError, TypeError): # pragma: no cover
return default
return tmp | 73dfcaadb6936304baa3471f1d1e980f815a7057 | 3,650,991 |
import six
def GetSpec(resource_type, message_classes, api_version):
"""Returns a Spec for the given resource type."""
spec = _GetSpecsForVersion(api_version)
if resource_type not in spec:
raise KeyError('"%s" not found in Specs for version "%s"' %
(resource_type, api_version))
spec = spec[resource_type]
table_cols = []
for name, action in spec.table_cols:
if isinstance(action, six.string_types):
table_cols.append((name, property_selector.PropertyGetter(action)))
elif callable(action):
table_cols.append((name, action))
else:
raise ValueError('expected function or property in table_cols list: {0}'
.format(spec))
message_class = getattr(message_classes, spec.message_class_name)
fields = list(_ProtobufDefinitionToFields(message_class))
return Spec(message_class=message_class,
fields=fields,
table_cols=table_cols,
transformations=spec.transformations,
editables=spec.editables) | ece9dd996c52f01bb985af9529b33bb7b12fbfdc | 3,650,992 |
def ips_between(start: str, end: str) -> int:
"""
A function that receives two IPv4 addresses,
and returns the number of addresses between
them (including the first one, excluding the
last one).
All inputs will be valid IPv4 addresses in
the form of strings. The last address will
always be greater than the first one.
:param start:
:param end:
:return:
"""
ip_start = [int(a) for a in start.split('.')]
ip_end = [int(b) for b in end.split('.')]
ips = zip(ip_start, ip_end)
ips_range = [0, 0, 0, 0]
for ip_id, ip in enumerate(ips):
calc_ip_range(ip, ip_id, ips_range)
return calc_result(ips_range) | aa523ec8a127e2224b7c9fc7a67d720ac4d100ed | 3,650,993 |
def tmNstate(trTrg):
"""Given (newq, new_tape_sym, dir),
return newq.
"""
return trTrg[0] | 17db0bc5cae4467e7a66d506e1f32d48c949e5eb | 3,650,994 |
import argparse
def parse_arguments(args):
"""
Parse the arguments from the user
"""
parser = argparse.ArgumentParser(
description= "Create UniRef database for HUMAnN2\n",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
"-v","--verbose",
help="additional output is printed\n",
action="store_true",
default=False)
parser.add_argument(
"-i","--input",
help="the UniRef fasta file to read\n",
required=True)
parser.add_argument(
"-o","--output",
help="the UniRef database to write\n",
required=True)
parser.add_argument(
"-f","--filter",
help="string to use for filtering (example: uncharacterized)\n")
parser.add_argument(
"--exclude-list",
help="file of id list to use for filtering (example: id_list.tsv)\n")
parser.add_argument(
"--include-list",
help="file of id list to use for filtering (example: id_list.tsv)\n")
parser.add_argument(
"-d","--format-database",
choices=["fasta","rapsearch","diamond"],
default="fasta",
help="format of output files (default: fasta)\n")
return parser.parse_args() | d547c0017904d91a930f149a6b085d2e0c87fe88 | 3,650,995 |
def _preprocess_continuous_variable(df: pd.DataFrame, var_col: str, bins: int,
min_val: float = None,
max_val: float = None) -> pd.DataFrame:
"""
Pre-processing the histogram for continuous variables by splitting the variable in buckets.
:param df: (pd.DataFrame) Data frame containing at least the continuous variable
:param var_col: (str) Name of the continuous variable
:param bins: (int) Preferred number of bins in histogram
:param min_val: (float, optional) Minimal value to be taken by the variable (if other than the minimum observed in
the data.
:param max_val: (float, optional) Maximal value to be taken by the variable (if other than the maximum observed in
the data.
:return: pd.DataFrame with *var_col* transformed to range
"""
# set *min_val* and *max_val* to minimal and maximal values observed in data
if min_val is None:
min_val = df[var_col].min()
if max_val is None:
max_val = df[var_col].max()
# compute the most appropriate step size for the histogram
step_size, decimals = _compute_step_size(min_val, max_val, bins)
min_val = min_val - (min_val % step_size)
# cut values into buckets
df[var_col] = pd.cut(df[var_col],
list(np.arange(min_val, max_val, step_size)) + [max_val],
include_lowest=True)
# convert buckets into strings
if decimals == 0:
df[var_col] = df[var_col].map(lambda x: f"{int(np.round(x.left))} - {int(np.round(x.right))}")
else:
df[var_col] = df[var_col].map(lambda x: f"{np.round(x.left, decimals)} - {np.round(x.right, decimals)}")
return df | 9c2844497dbe55727f6b2aea17cf7a23e60a3002 | 3,650,996 |
import itertools
def get_pairs(labels):
"""
For the labels of a given word, creates all possible pairs
of labels that match sense
"""
result = []
unique = np.unique(labels)
for label in unique:
ulabels = np.where(labels==label)[0]
# handles when a word sense has only one occurrence
if len(ulabels) == 1:
# returns the instance paired with itself, so it can be counted
result.append((ulabels[0], ulabels[0]))
else:
for p in itertools.combinations(ulabels, 2):
result.append(p)
return result | 454de57eedf6f272fef2c15b40f84de57ed3fa64 | 3,650,997 |
def iredv(tvp,tton):
""" makes sop tvp irredundant relative to onset truth table"""
res = []
red = list(tvp)
for j in range(len(tvp)):
tvj=tvp[j]&tton #care part of cube j
if (tvj&~or_redx(red,j)) == m.const(0): # reduce jth cube to 0
red[j]=m.const(0)
else: #keep cube j
res = res + [tvp[j]]
return res | 5fdb9ed97216b668110908419b364107ed3b7c37 | 3,650,998 |
def ridder_fchp(st, target=0.02, tol=0.001, maxiter=30, maxfc=0.5, config=None):
"""Search for highpass corner using Ridder's method.
Search such that the criterion that the ratio between the maximum of a third order
polynomial fit to the displacement time series and the maximum of the displacement
timeseries is a target % within a tolerance.
This algorithm searches between a low initial corner frequency a maximum fc.
Method developed originally by Scott Brandenberg
Args:
st (StationStream):
Stream of data.
target (float):
target percentage for ratio between max polynomial value and max
displacement.
tol (float):
tolereance for matching the ratio target
maxiter (float):
maximum number of allowed iterations in Ridder's method
maxfc (float):
Maximum allowable value of the highpass corner freq.
int_method (string):
method used to perform integration between acceleration, velocity, and
dispacement. Options are "frequency_domain", "time_domain_zero_init" or
"time_domain_zero_mean"
config (dict):
Configuration dictionary (or None). See get_config().
Returns:
StationStream.
"""
if not st.passed:
return st
if config is None:
config = get_config()
processing_steps = config["processing"]
ps_names = [list(ps.keys())[0] for ps in processing_steps]
ind = int(np.where(np.array(ps_names) == "highpass_filter")[0][0])
hp_args = processing_steps[ind]["highpass_filter"]
frequency_domain = hp_args["frequency_domain"]
if frequency_domain is True:
filter_code = 1
elif frequency_domain is False:
filter_code = 0
for tr in st:
initial_corners = tr.getParameter("corner_frequencies")
initial_f_hp = initial_corners["highpass"]
new_f_hp = get_fchp(
dt=tr.stats.delta,
acc=tr.data,
target=target,
tol=tol,
poly_order=FORDER,
maxiter=maxiter,
fchp_max=maxfc,
filter_type=filter_code,
)
# Method did not converge if new_f_hp reaches maxfc
if (maxfc - new_f_hp) > 1e9:
tr.fail("auto_fchp did not find an acceptable f_hp.")
continue
if new_f_hp > initial_f_hp:
tr.setParameter(
"corner_frequencies",
{
"type": "snr_polyfit",
"highpass": new_f_hp,
"lowpass": initial_corners["lowpass"],
},
)
return st | ee3198c443885fa9524d12c30aa277d8cd843d27 | 3,650,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.