Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
22,300 | def attach(domain, filename):
count = 0
errors = 0
with open(filename) as csvfile:
reader = csv.DictReader(csvfile,
delimiter=b,
quotechar=b)
for row in reader:
try:
dataset = Dataset.objects.get(id=ObjectId(row[]))
except:
log.warning(, row[])
errors += 1
continue
Dataset.objects(**{
: domain,
: row[]
}).update(**{
: True,
: True
})
dataset.extras[] = domain
dataset.extras[] = row[]
dataset.last_modified = datetime.now()
dataset.save()
count += 1
return AttachResult(count, errors) | Attach existing dataset to their harvest remote id before harvesting.
The expected csv file format is the following:
- a column with header "local" and the local IDs or slugs
- a column with header "remote" and the remote IDs
The delimiter should be ";". columns order
and extras columns does not matter |
22,301 | def Guo_Sun(dp, voidage, vs, rho, mu, Dt, L=1):
r
Rem = dp*rho*vs/mu/(1-voidage)
fv = 180 + (9.5374*dp/Dt - 2.8054)*Rem**0.97
return fv*(mu*vs*L/dp**2)*(1-voidage)**2/voidage**3 | r'''Calculates pressure drop across a packed bed of spheres using a
correlation developed in [1]_. This is valid for highly-packed particles
at particle/tube diameter ratios between 2 and 3, where a ring packing
structure occurs. If a packing ratio is so low, it is important to use this
model because in some cases its predictions are as low as half those of
other models!
.. math::
f_v = 180 + \left(9.5374\frac{d_p}{D_t} - 2.8054\right)Re_{Erg}^{0.97}
.. math::
f_v = \frac{\Delta P d_p^2}{\mu v_s L}\frac{\epsilon^3}{(1-\epsilon)^2}
.. math::
Re_{Erg} = \frac{\rho v_s d_p}{\mu(1-\epsilon)}
Parameters
----------
dp : float
Particle diameter of spheres [m]
voidage : float
Void fraction of bed packing [-]
vs : float
Superficial velocity of the fluid (volumetric flow rate/cross-sectional
area)[m/s]
rho : float
Density of the fluid [kg/m^3]
mu : float
Viscosity of the fluid, [Pa*s]
Dt : float
Diameter of the tube, [m]
L : float, optional
Length the fluid flows in the packed bed [m]
Returns
-------
dP : float
Pressure drop across the bed [Pa]
Notes
-----
Developed with data in the range of:
.. math::
100 < Re_{m} <33000\\
2 < d_t/d_p < 3 1\\
0.476 < \epsilon <0.492
Examples
--------
>>> Guo_Sun(dp=14.2E-3, voidage=0.492, vs=0.6, rho=1E3, mu=1E-3, Dt=40.9E-3)
42019.529911473706
References
----------
.. [1] Guo, Zehua, Zhongning Sun, Nan Zhang, Ming Ding, and Jiaqing Liu.
"Pressure Drop in Slender Packed Beds with Novel Packing Arrangement."
Powder Technology 321 (November 2017): 286-92.
doi:10.1016/j.powtec.2017.08.024. |
22,302 | def pop_context(self):
processor = getattr(self, , None)
if processor is not None:
pop_context = getattr(processor, , None)
if pop_context is None:
pop_context = getattr(processor, , None)
if pop_context is not None:
return pop_context()
if self._pop_next:
self._pop_next = False | Pops the last set of keyword arguments provided to the processor. |
22,303 | def plot_dict(self, flags, label=, known=, **kwargs):
out = []
for lab, flag in flags.items():
if label.lower() == :
lab = flag.name
elif label.lower() != :
lab = label
out.append(self.plot_flag(flag, label=to_string(lab), known=known,
**kwargs))
return out | Plot a `~gwpy.segments.DataQualityDict` onto these axes
Parameters
----------
flags : `~gwpy.segments.DataQualityDict`
data-quality dict to display
label : `str`, optional
labelling system to use, or fixed label for all `DataQualityFlags`.
Special values include
- ``'key'``: use the key of the `DataQualityDict`,
- ``'name'``: use the :attr:`~DataQualityFlag.name` of the
`DataQualityFlag`
If anything else, that fixed label will be used for all lines.
known : `str`, `dict`, `None`, default: '/'
display `known` segments with the given hatching, or give a
dict of keyword arguments to pass to
:meth:`~SegmentAxes.plot_segmentlist`, or `None` to hide.
**kwargs
any other keyword arguments acceptable for
`~matplotlib.patches.Rectangle`
Returns
-------
collection : `~matplotlib.patches.PatchCollection`
list of `~matplotlib.patches.Rectangle` patches |
22,304 | def Recurrent(step_model):
ops = step_model.ops
def recurrent_fwd(seqs, drop=0.0):
lengths = [len(X) for X in seqs]
X, size_at_t, unpad = ops.square_sequences(seqs)
Y = ops.allocate((X.shape[0], X.shape[1], step_model.nO))
cell_drop = ops.get_dropout_mask((len(seqs), step_model.nO), 0.0)
hidden_drop = ops.get_dropout_mask((len(seqs), step_model.nO), 0.0)
out_drop = ops.get_dropout_mask((len(seqs), step_model.nO), 0.0)
backprops = [None] * max(lengths)
state = step_model.weights.get_initial_state(len(seqs))
for t in range(max(lengths)):
state = list(state)
size = size_at_t[t]
Xt = X[t, :size]
state[0] = state[0][:size]
state[1] = state[1][:size]
if cell_drop is not None:
state[0] *= cell_drop
if hidden_drop is not None:
state[1] *= hidden_drop
inputs = (state, Xt)
(state, Y[t, :size]), backprops[t] = step_model.begin_update(inputs)
if out_drop is not None:
Y[t, :size] *= out_drop
outputs = unpad(Y)
def recurrent_bwd(d_outputs, sgd=None):
dY, size_at_t, unpad = step_model.ops.square_sequences(d_outputs)
d_state = [
step_model.ops.allocate((dY.shape[1], step_model.nO)),
step_model.ops.allocate((dY.shape[1], step_model.nO)),
]
updates = {}
def gather_updates(weights, gradient, key=None):
updates[key] = (weights, gradient)
dX = step_model.ops.allocate(
(dY.shape[0], dY.shape[1], step_model.weights.nI)
)
for t in range(max(lengths) - 1, -1, -1):
if out_drop is not None:
dY[t] *= out_drop
d_state_t, dXt = backprops[t]((d_state, dY[t]), sgd=gather_updates)
d_state[0][: d_state_t[0].shape[0]] = d_state_t[0]
d_state[1][: d_state_t[1].shape[0]] = d_state_t[1]
dX[t, : dXt.shape[0]] = dXt
if cell_drop is not None:
d_state[0] *= cell_drop
if hidden_drop is not None:
d_state[1] *= hidden_drop
d_cell, d_hidden = d_state
step_model.weights.d_initial_cells += d_cell.sum(axis=0)
step_model.weights.d_initial_hiddens += d_hidden.sum(axis=0)
if sgd is not None:
for key, (weights, gradient) in updates.items():
sgd(weights, gradient, key=key)
return unpad(dX)
return outputs, recurrent_bwd
model = wrap(recurrent_fwd, step_model)
model.nO = step_model.nO
return model | Apply a stepwise model over a sequence, maintaining state. For RNNs |
22,305 | def from_client_config(cls, client_config, scopes, **kwargs):
if in client_config:
client_type =
elif in client_config:
client_type =
else:
raise ValueError(
)
session, client_config = (
google_auth_oauthlib.helpers.session_from_client_config(
client_config, scopes, **kwargs))
redirect_uri = kwargs.get(, None)
return cls(session, client_type, client_config, redirect_uri) | Creates a :class:`requests_oauthlib.OAuth2Session` from client
configuration loaded from a Google-format client secrets file.
Args:
client_config (Mapping[str, Any]): The client
configuration in the Google `client secrets`_ format.
scopes (Sequence[str]): The list of scopes to request during the
flow.
kwargs: Any additional parameters passed to
:class:`requests_oauthlib.OAuth2Session`
Returns:
Flow: The constructed Flow instance.
Raises:
ValueError: If the client configuration is not in the correct
format.
.. _client secrets:
https://developers.google.com/api-client-library/python/guide
/aaa_client_secrets |
22,306 | def lesser(lhs, rhs):
return _ufunc_helper(
lhs,
rhs,
op.broadcast_lesser,
lambda x, y: 1 if x < y else 0,
_internal._lesser_scalar,
_internal._greater_scalar) | Returns the result of element-wise **lesser than** (<) comparison operation
with broadcasting.
For each element in input arrays, return 1(true) if lhs elements are less than rhs,
otherwise return 0(false).
Equivalent to ``lhs < rhs`` and ``mx.nd.broadcast_lesser(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or mxnet.ndarray.array
First array to be compared.
rhs : scalar or mxnet.ndarray.array
Second array to be compared. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
Output array of boolean values.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> (x < 1).asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> (x < y).asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> mx.nd.lesser(x, y).asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> (z < y).asnumpy()
array([[ 0., 0.],
[ 1., 0.]], dtype=float32) |
22,307 | def find_poor_default_arg(node):
poor_defaults = [
ast.Call,
ast.Dict,
ast.DictComp,
ast.GeneratorExp,
ast.List,
ast.ListComp,
ast.Set,
ast.SetComp,
]
return (
isinstance(node, ast.FunctionDef)
and any((n for n in node.args.defaults if type(n) in poor_defaults))
) | Finds poor default args |
22,308 | def LSRS(self, params):
try:
Ra, Rb, Rc = self.get_three_parameters(self.THREE_PARAMETER_COMMA_SEPARATED, params)
except iarm.exceptions.ParsingError:
Rb, Rc = self.get_two_parameters(self.TWO_PARAMETER_COMMA_SEPARATED, params)
Ra = Rb
if self.is_register(Rc):
self.check_arguments(low_registers=(Ra, Rc))
self.match_first_two_parameters(Ra, Rb)
def LSRS_func():
if (self.register[Rc] > 0) and (self.register[Rb] & (1 << (self.register[Rc] - 1))):
self.set_APSR_flag_to_value(, 1)
else:
self.set_APSR_flag_to_value(, 0)
self.register[Ra] = self.register[Ra] >> self.register[Rc]
self.set_NZ_flags(self.register[Ra])
else:
self.check_arguments(low_registers=(Ra, Rb), imm5_counting=(Rc,))
shift_amount = self.check_immediate(Rc)
def LSRS_func():
if self.register[Rb] & (1 << (shift_amount - 1)):
self.set_APSR_flag_to_value(, 1)
else:
self.set_APSR_flag_to_value(, 0)
self.register[Ra] = self.register[Rb] >> shift_amount
self.set_NZ_flags(self.register[Ra])
return LSRS_func | LSRS [Ra,] Ra, Rc
LSRS [Ra,] Rb, #imm5_counting
Logical shift right Rb by Rc or imm5 and store the result in Ra
imm5 counting is [1, 32]
In the register shift, the first two operands must be the same register
Ra, Rb, and Rc must be low registers
If Ra is omitted, then it is assumed to be Rb |
22,309 | def get_external_logger(name=None, short_name=" ", log_to_file=True):
global LOGGERS
loggername = name
logger = _check_existing_logger(loggername, short_name)
if logger is not None:
return logger
logging_config = LOGGING_CONFIG.get(name, LOGGING_CONFIG.get("external"))
filename = logging_config.get("file", {}).get("name", loggername)
if not filename.endswith(".log"):
filename = str(filename) + ".log"
logger = _get_basic_logger(loggername, log_to_file, get_base_logfilename(filename))
cbh = logging.StreamHandler()
cbh.formatter = BenchFormatterWithType(COLOR_ON)
if VERBOSE_LEVEL == 1 and not SILENT_ON:
cbh.setLevel(logging.INFO)
elif VERBOSE_LEVEL >= 2 and not SILENT_ON:
cbh.setLevel(logging.DEBUG)
elif SILENT_ON:
cbh.setLevel(logging.ERROR)
else:
cbh.setLevel(getattr(logging, logging_config.get("level")))
logger.addHandler(cbh)
LOGGERS[loggername] = BenchLoggerAdapter(logger, {"source": short_name})
return LOGGERS[loggername] | Get a logger for external modules, whose logging should usually be on a less verbose level.
:param name: Name for logger
:param short_name: Shorthand name for logger
:param log_to_file: Boolean, True if logger should log to a file as well.
:return: Logger |
22,310 | def LOS_PRMin(Ds, dus, kPOut=None, Eps=1.e-12, Test=True):
if Test:
assert Ds.ndim in [1,2] and 3 in Ds.shape and Ds.shape==dus.shape
assert kPOut is None or (Ds.ndim==1 and not hasattr(kPOut,)) or (Ds.ndim==2 and kPOut.shape==(Ds.size/3,))
v = Ds.ndim==1
if v:
Ds = Ds.reshape((3,1))
dus = dus.reshape((3,1))
if kPOut is not None:
kPOut = np.array([kPOut])
kRMin = np.nan*np.ones((Ds.shape[1],))
uparN = np.sqrt(dus[0,:]**2 + dus[1,:]**2)
ind = uparN>Eps
kRMin[~ind] = 0.
kRMin[ind] = -(dus[0,ind]*Ds[0,ind]+dus[1,ind]*Ds[1,ind])/uparN[ind]**2
kRMin[kRMin<=0.] = 0.
if kPOut is not None:
kRMin[kRMin>kPOut] = kPOut[kRMin>kPOut]
if v:
kRMin = kRMin[0]
return kRMin | Compute the point on the LOS where the major radius is minimum |
22,311 | def _update_srcmap(self, name, src, **kwargs):
k = self._create_srcmap(name, src, **kwargs)
scale = self._src_expscale.get(name, 1.0)
k *= scale
self.like.logLike.sourceMap(str(name)).model()
self.like.logLike.setSourceMapImage(str(name), np.ravel(k))
self.like.logLike.sourceMap(str(name)).model()
normPar = self.like.normPar(name)
if not normPar.isFree():
self.like.logLike.buildFixedModelWts() | Update the source map for an existing source in memory. |
22,312 | def events(self):
if not self.__events:
self.__events = Events(self.__connection)
return self.__events | Gets the Events API client.
Returns:
Events: |
22,313 | def is_transition(self):
return self.is_snv and is_purine(self.ref) == is_purine(self.alt) | Is this variant and pyrimidine to pyrimidine change or purine to purine change |
22,314 | def malloc(self, sim_size):
raise NotImplementedError("%s not implemented for %s" % (self.malloc.__func__.__name__,
self.__class__.__name__)) | A somewhat faithful implementation of libc `malloc`.
:param sim_size: the amount of memory (in bytes) to be allocated
:returns: the address of the allocation, or a NULL pointer if the allocation failed |
22,315 | def simple(self):
if self._days:
return % self.totaldays
elif self.months:
return % self._months
elif self.years:
return % self.years
else:
return | A string representation with only one period delimiter. |
22,316 | def webui_data_stores_saved_query_key(self, **kwargs):
config = ET.Element("config")
webui = ET.SubElement(config, "webui", xmlns="http://tail-f.com/ns/webui")
data_stores = ET.SubElement(webui, "data-stores")
saved_query = ET.SubElement(data_stores, "saved-query")
key = ET.SubElement(saved_query, "key")
key.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
22,317 | def indicator(self, indicator_type, summary, **kwargs):
indicator_obj = Indicator(indicator_type, summary, **kwargs)
return self._indicator(indicator_obj) | Add Indicator data to Batch object.
Args:
indicator_type (str): The ThreatConnect define Indicator type.
summary (str): The value for this Indicator.
confidence (str, kwargs): The threat confidence for this Indicator.
date_added (str, kwargs): The date timestamp the Indicator was created.
last_modified (str, kwargs): The date timestamp the Indicator was last modified.
rating (str, kwargs): The threat rating for this Indicator.
xid (str, kwargs): The external id for this Indicator.
Returns:
obj: An instance of Indicator. |
22,318 | def alert_stream(self, reset_event, kill_event):
_LOGGING.debug(, self.name, self.cam_id)
start_event = False
parse_string = ""
fail_count = 0
url = % self.root_url
while True:
try:
stream = self.hik_request.get(url, stream=True,
timeout=(CONNECT_TIMEOUT,
READ_TIMEOUT))
if stream.status_code == requests.codes.not_found:
url = % self.root_url
stream = self.hik_request.get(url, stream=True)
if stream.status_code != requests.codes.ok:
raise ValueError()
else:
_LOGGING.debug(, self.name)
fail_count = 0
self.watchdog.start()
for line in stream.iter_lines():
if line:
str_line = line.decode("utf-8", "ignore")
if str_line.find() != -1:
start_event = True
parse_string += str_line
elif str_line.find() != -1:
parse_string += str_line
start_event = False
if parse_string:
tree = ET.fromstring(parse_string)
self.process_stream(tree)
self.update_stale()
parse_string = ""
else:
if start_event:
parse_string += str_line
if kill_event.is_set():
break
elif reset_event.is_set():
raise ValueError()
if kill_event.is_set():
_LOGGING.debug(,
self.name)
self.watchdog.stop()
self.hik_request.close()
return
elif reset_event.is_set():
raise ValueError()
except (ValueError,
requests.exceptions.ConnectionError,
requests.exceptions.ChunkedEncodingError) as err:
fail_count += 1
reset_event.clear()
_LOGGING.warning(,
self.name, fail_count, (fail_count * 5) + 5, err)
parse_string = ""
self.watchdog.stop()
self.hik_request.close()
time.sleep(5)
self.update_stale()
time.sleep(fail_count * 5)
continue | Open event stream. |
22,319 | def write_response(self, response):
if self._response_timeout_handler:
self._response_timeout_handler.cancel()
self._response_timeout_handler = None
try:
keep_alive = self.keep_alive
self.transport.write(
response.output(
self.request.version, keep_alive, self.keep_alive_timeout
)
)
self.log_response(response)
except AttributeError:
logger.error(
"Invalid response object for url %s, "
"Expected Type: HTTPResponse, Actual Type: %s",
self.url,
type(response),
)
self.write_error(ServerError("Invalid response type"))
except RuntimeError:
if self._debug:
logger.error(
"Connection lost before response written @ %s",
self.request.ip,
)
keep_alive = False
except Exception as e:
self.bail_out(
"Writing response failed, connection closed {}".format(repr(e))
)
finally:
if not keep_alive:
self.transport.close()
self.transport = None
else:
self._keep_alive_timeout_handler = self.loop.call_later(
self.keep_alive_timeout, self.keep_alive_timeout_callback
)
self._last_response_time = time()
self.cleanup() | Writes response content synchronously to the transport. |
22,320 | def RemoveBackground(EPIC, campaign=None):
if campaign is None:
campaign = Campaign(EPIC)
if hasattr(campaign, ):
raise AttributeError(
"Please choose a campaign/season for this target: %s." % campaign)
if campaign < 3:
return True
else:
return False | Returns :py:obj:`True` or :py:obj:`False`, indicating whether or not
to remove the background flux for the target. If ``campaign < 3``,
returns :py:obj:`True`, otherwise returns :py:obj:`False`. |
22,321 | def area(x,y):
return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1))) | Calculate the area of a polygon given as x(...),y(...)
Implementation of Shoelace formula |
22,322 | def _post_clean(self):
super(NgModelFormMixin, self)._post_clean()
if self._errors and self.prefix:
self._errors = ErrorDict((self.add_prefix(name), value) for name, value in self._errors.items()) | Rewrite the error dictionary, so that its keys correspond to the model fields. |
22,323 | def constant_time_compare(val1, val2):
if _builtin_constant_time_compare is not None:
return _builtin_constant_time_compare(val1, val2)
len_eq = len(val1) == len(val2)
if len_eq:
result = 0
left = val1
else:
result = 1
left = val2
for x, y in izip(bytearray(left), bytearray(val2)):
result |= x ^ y
return result == 0 | Returns True if the two strings are equal, False otherwise.
The time taken is independent of the number of characters that match. Do
not use this function for anything else than comparision with known
length targets.
This is should be implemented in C in order to get it completely right. |
22,324 | def createCellsFixedNum (self):
from .. import sim
cells = []
self.rand.Random123(self.tags[], sim.net.lastGid, sim.cfg.seeds[])
self.rand.uniform(0, 1)
vec = h.Vector(self.tags[]*3)
vec.setrand(self.rand)
randLocs = np.array(vec).reshape(self.tags[], 3)
if sim.net.params.shape == :
rho = randLocs[:,0]
phi = 2 * pi * randLocs[:,2]
x = (1 + sqrt(rho) * cos(phi))/2.0
z = (1 + sqrt(rho) * sin(phi))/2.0
randLocs[:,0] = x
randLocs[:,2] = z
elif sim.net.params.shape == :
rho = np.power(randLocs[:,0], 1.0/3.0)
phi = 2 * pi * randLocs[:,1]
costheta = (2 * randLocs[:,2]) - 1
theta = arccos(costheta)
x = (1 + rho * cos(phi) * sin(theta))/2.0
y = (1 + rho * sin(phi) * sin(theta))/2.0
z = (1 + rho * cos(theta))/2.0
randLocs[:,0] = x
randLocs[:,1] = y
randLocs[:,2] = z
for icoord, coord in enumerate([, , ]):
if coord+ in self.tags:
self.tags[coord+] = [float(point) / getattr(sim.net.params, +coord.upper()) for point in self.tags[coord+]]
if coord+ in self.tags:
minv = self.tags[coord+][0]
maxv = self.tags[coord+][1]
randLocs[:,icoord] = randLocs[:,icoord] * (maxv-minv) + minv
for i in self._distributeCells(int(sim.net.params.scale * self.tags[]))[sim.rank]:
gid = sim.net.lastGid+i
self.cellGids.append(gid)
cellTags = {k: v for (k, v) in self.tags.items() if k in sim.net.params.popTagsCopiedToCells}
cellTags[] = self.tags[]
cellTags[] = randLocs[i,0]
cellTags[] = randLocs[i,1]
cellTags[] = randLocs[i,2]
cellTags[] = sim.net.params.sizeX * randLocs[i,0]
cellTags[] = sim.net.params.sizeY * randLocs[i,1]
cellTags[] = sim.net.params.sizeZ * randLocs[i,2]
if in self.tags:
if isinstance(self.tags[][0], list):
try:
cellTags[][] = self.tags[][i]
except:
pass
else:
cellTags[][] = self.tags[]
cells.append(self.cellModelClass(gid, cellTags))
if sim.cfg.verbose: print((%(i, sim.net.params.scale * self.tags[]-1, gid, self.tags[], sim.rank)))
sim.net.lastGid = sim.net.lastGid + self.tags[]
return cells | Create population cells based on fixed number of cells |
22,325 | def read_bit(self):
if not self.bitcount:
self.bits = ord(self.input.read(1))
self.bitcount = 8
result = (self.bits & 1) == 1
self.bits >>= 1
self.bitcount -= 1
return result | Read a single boolean value. |
22,326 | def pca_eig(x):
s, w = np.linalg.eigh(x.dot(x.T))
return w, s | Calculate PCA using eigenvalue decomposition.
Parameters
----------
x : ndarray, shape (channels, samples)
Two-dimensional input data.
Returns
-------
w : ndarray, shape (channels, channels)
Eigenvectors (principal components) (in columns).
s : ndarray, shape (channels,)
Eigenvalues. |
22,327 | def organization_fields(self, organization):
return self._query_zendesk(self.endpoint.organization_fields, , id=organization) | Retrieve the organization fields for this organization.
:param organization: Organization object or id |
22,328 | def iplot_state(quantum_state, method=, figsize=None):
warnings.warn("iplot_state is deprecated, and will be removed in \
the 0.9 release. Use the iplot_state_ * functions \
instead.",
DeprecationWarning)
rho = _validate_input_state(quantum_state)
if method == "city":
iplot_state_city(rho, figsize=figsize)
elif method == "paulivec":
iplot_state_paulivec(rho, figsize=figsize)
elif method == "qsphere":
iplot_state_qsphere(rho, figsize=figsize)
elif method == "bloch":
iplot_bloch_multivector(rho, figsize=figsize)
elif method == "hinton":
iplot_state_hinton(rho, figsize=figsize)
else:
raise VisualizationError() | Plot the quantum state.
Args:
quantum_state (ndarray): statevector or density matrix
representation of a quantum state.
method (str): Plotting method to use.
figsize (tuple): Figure size in pixels.
Raises:
VisualizationError: if the input is not a statevector or density
matrix, or if the state is not an multi-qubit quantum state. |
22,329 | def anim(self, start=0, stop=None, fps=30):
figure = self.state or self.initialize_plot()
anim = animation.FuncAnimation(figure, self.update_frame,
frames=self.keys,
interval = 1000.0/fps)
if self._close_figures: plt.close(figure)
return anim | Method to return a matplotlib animation. The start and stop
frames may be specified as well as the fps. |
22,330 | def _get_policies(self):
username = self._get_username_for_key()
policies = self.client.list_user_policies(
UserName=username
)
return policies | Returns all the policy names for a given user |
22,331 | def get_language_pairs(self, train_langs=None):
if train_langs is None:
result = self.api_call()
else:
result = self.api_call(
.format(train_langs))
try:
langs_json = json.loads(result.content)
if in langs_json:
return []
languages = [LangPair(Language(
shortname=lang_json["lang_pair"]["source_language"][
"shortname"],
name=lang_json["lang_pair"]["source_language"]["name"]),
Language(shortname=lang_json["lang_pair"][
"target_language"]["shortname"],
name=lang_json["lang_pair"][
"target_language"]["name"])
) for lang_json in langs_json["objects"]]
except Exception, e:
log.exception("Error decoding get language pairs")
raise e
return languages | Returns the language pairs available on unbabel |
22,332 | def get_overlay(self):
overlay = {}
dcmlist = self.files_in_serie
for i in range(len(dcmlist)):
onefile = dcmlist[i]
logger.info("reading " % onefile)
data = self._read_file(onefile)
if len(overlay) == 0:
for i_overlay in range(0, 50):
try:
data2d = decode_overlay_slice(data, i_overlay)
shp2 = data2d.shape
overlay[i_overlay] = np.zeros([len(dcmlist), shp2[0],
shp2[1]], dtype=np.int8)
overlay[i_overlay][-i - 1, :, :] = data2d
except Exception:
pass
else:
for i_overlay in overlay.keys():
try:
data2d = decode_overlay_slice(data, i_overlay)
overlay[i_overlay][-i - 1, :, :] = data2d
except Exception:
logger.warning( +
str(i_overlay))
return overlay | Function make 3D data from dicom file slices. There are usualy
more overlays in the data. |
22,333 | def climate_stats(self, startclim, endclim, type, **kwargs):
r
self._check_geo_param(kwargs)
kwargs[] = type
kwargs[] = startclim
kwargs[] = endclim
kwargs[] = self.token
return self._get_response(, kwargs) | r""" Returns a dictionary of aggregated yearly climate statistics (count, standard deviation,
average, median, maximum, minimum, min time, and max time depending on user specified type) of a time series
for a specified range of time at user specified location. Users must specify at least one geographic search
parameter ('stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa', 'nwsfirezone', 'gacc', or 'subgacc')
to obtain observation data. Other parameters may also be included. See below mandatory and optional parameters.
Also see the metadata() function for station IDs.
Arguments:
----------
type: string, mandatory
Describes what statistical values will be returned. Can be one of the following values:
"avg"/"average"/"mean", "max"/"maximum", "min"/"minimum", "stdev"/"standarddeviation"/"std", "median"/"med",
"count", or "all". "All" will return all of the statistics.
startclim: string, mandatory
Start date in form of MMDDhhmm. MUST BE USED WITH THE ENDCLIM PARAMETER. Default time is UTC
e.g. startclim=06011800 Do not specify a year.
endclim: string, mandatory
End date in form of MMDDhhmm. MUST BE USED WITH THE STARTCLIM PARAMETER. Default time is UTC
e.g. endclim=06011800 Do not specify a year.
obtimezone: string, optional
Set to either UTC or local. Sets timezone of obs. Default is UTC. e.g. obtimezone='local'.
showemptystations: string, optional
Set to '1' to show stations even if no obs exist that match the time period. Stations without obs are
omitted by default.
stid: string, optional
Single or comma separated list of MesoWest station IDs. e.g. stid='kden,kslc,wbb'
county: string, optional
County/parish/borough (US/Canada only), full name e.g. county='Larimer'
state: string, optional
US state, 2-letter ID e.g. state='CO'
country: string, optional
Single or comma separated list of abbreviated 2 or 3 character countries e.g. country='us,ca,mx'
radius: string, optional
Distance from a lat/lon pt or stid as [lat,lon,radius (mi)] or [stid, radius (mi)]. e.g. radius="-120,40,20"
bbox: string, optional
Stations within a [lon/lat] box in the order [lonmin,latmin,lonmax,latmax] e.g. bbox="-120,40,-119,41"
cwa: string, optional
NWS county warning area. See http://www.nws.noaa.gov/organization.php for CWA list. e.g. cwa='LOX'
nwsfirezone: string, optional
NWS fire zones. See http://www.nws.noaa.gov/geodata/catalog/wsom/html/firezone.htm for a shapefile
containing the full list of zones. e.g. nwsfirezone='LOX241'
gacc: string, optional
Name of Geographic Area Coordination Center e.g. gacc='EBCC' See http://gacc.nifc.gov/ for a list of GACCs.
subgacc: string, optional
Name of Sub GACC e.g. subgacc='EB07'
vars: string, optional
Single or comma separated list of sensor variables. Will return all stations that match one of provided
variables. Useful for filtering all stations that sense only certain vars. Do not request vars twice in
the query. e.g. vars='wind_speed,pressure' Use the variables function to see a list of sensor vars.
units: string, optional
String or set of strings and pipes separated by commas. Default is metric units. Set units='ENGLISH' for
FREEDOM UNITS ;) Valid other combinations are as follows: temp|C, temp|F, temp|K; speed|mps, speed|mph,
speed|kph, speed|kts; pres|pa, pres|mb; height|m, height|ft; precip|mm, precip|cm, precip|in; alti|pa,
alti|inhg. e.g. units='temp|F,speed|kph,metric'
groupby: string, optional
Results can be grouped by key words: state, county, country, cwa, nwszone, mwsfirezone, gacc, subgacc
e.g. groupby='state'
timeformat: string, optional
A python format string for returning customized date-time groups for observation times. Can include
characters. e.g. timeformat='%m/%d/%Y at %H:%M'
Returns:
--------
Dictionary of aggregated climatology statistics.
Raises:
-------
None. |
22,334 | def _validate_positional_arguments(args):
nouns = []
for arg in args:
if not arg.startswith() or not arg.startswith():
nouns.append(arg)
else:
break
while nouns:
search = .join(nouns)
if not next((x for x in azext_alias.cached_reserved_commands if x.endswith(search)), False):
del nouns[-1]
else:
return
raise CLIError(INVALID_ALIAS_COMMAND_ERROR.format(.join(args))) | To validate the positional argument feature - https://github.com/Azure/azure-cli/pull/6055.
Assuming that unknown commands are positional arguments immediately
led by words that only appear at the end of the commands
Slight modification of
https://github.com/Azure/azure-cli/blob/dev/src/azure-cli-core/azure/cli/core/commands/__init__.py#L356-L373
Args:
args: The arguments that the user inputs in the terminal.
Returns:
Rudimentary parsed arguments. |
22,335 | def _redefines_import(node):
current = node
while current and not isinstance(current.parent, astroid.ExceptHandler):
current = current.parent
if not current or not utils.error_of_type(current.parent, ImportError):
return False
try_block = current.parent.parent
for import_node in try_block.nodes_of_class((astroid.ImportFrom, astroid.Import)):
for name, alias in import_node.names:
if alias:
if alias == node.name:
return True
elif name == node.name:
return True
return False | Detect that the given node (AssignName) is inside an
exception handler and redefines an import from the tryexcept body.
Returns True if the node redefines an import, False otherwise. |
22,336 | def get_computer_desc():
t exist or the variable doesn*
hostname_cmd = salt.utils.path.which()
if hostname_cmd:
desc = __salt__[](
[hostname_cmd, , ],
python_shell=False
)
else:
desc = None
pattern = re.compile(r)
try:
with salt.utils.files.fopen(, ) as mach_info:
for line in mach_info.readlines():
line = salt.utils.stringutils.to_unicode(line)
match = pattern.match(line)
if match:
desc = _strip_quotes(match.group(1).strip())
except IOError:
pass
if desc is None:
return False
return desc.replace(r, r).replace(r, ).replace(r, ) | Get PRETTY_HOSTNAME value stored in /etc/machine-info
If this file doesn't exist or the variable doesn't exist
return False.
:return: Value of PRETTY_HOSTNAME if this does not exist False.
:rtype: str
CLI Example:
.. code-block:: bash
salt '*' system.get_computer_desc |
22,337 | def shift(self, top=None, right=None, bottom=None, left=None):
top = top if top is not None else 0
right = right if right is not None else 0
bottom = bottom if bottom is not None else 0
left = left if left is not None else 0
return self.copy(
x1=self.x1+left-right,
x2=self.x2+left-right,
y1=self.y1+top-bottom,
y2=self.y2+top-bottom
) | Shift the bounding box from one or more image sides, i.e. move it on the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift the bounding box from the top.
right : None or int, optional
Amount of pixels by which to shift the bounding box from the right.
bottom : None or int, optional
Amount of pixels by which to shift the bounding box from the bottom.
left : None or int, optional
Amount of pixels by which to shift the bounding box from the left.
Returns
-------
result : imgaug.BoundingBox
Shifted bounding box. |
22,338 | def add_remote(name, location):
s name.
location (str): The location of the repository.
Returns:
dict: The ``result`` and ``output``.
CLI Example:
.. code-block:: bash
salt flatpak.add_remote flathub https://flathub.org/repo/flathub.flatpakrepo
resultoutputcmd.run_all remote-add retcodestderrstderrstderrresultstdoutstdoutresult'] = True
return ret | Adds a new location to install flatpak packages from.
Args:
name (str): The repository's name.
location (str): The location of the repository.
Returns:
dict: The ``result`` and ``output``.
CLI Example:
.. code-block:: bash
salt '*' flatpak.add_remote flathub https://flathub.org/repo/flathub.flatpakrepo |
22,339 | def apply_defaults(func):
sig_cache = signature(func)
non_optional_args = {
name for (name, param) in sig_cache.parameters.items()
if param.default == param.empty and
param.name != and
param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)}
@wraps(func)
def wrapper(*args, **kwargs):
if len(args) > 1:
raise AirflowException(
"Use keyword arguments when initializing operators")
dag_args = {}
dag_params = {}
dag = kwargs.get(, None) or settings.CONTEXT_MANAGER_DAG
if dag:
dag_args = copy(dag.default_args) or {}
dag_params = copy(dag.params) or {}
params = {}
if in kwargs:
params = kwargs[]
dag_params.update(params)
default_args = {}
if in kwargs:
default_args = kwargs[]
if in default_args:
dag_params.update(default_args[])
del default_args[]
dag_args.update(default_args)
default_args = dag_args
for arg in sig_cache.parameters:
if arg not in kwargs and arg in default_args:
kwargs[arg] = default_args[arg]
missing_args = list(non_optional_args - set(kwargs))
if missing_args:
msg = "Argument {0} is required".format(missing_args)
raise AirflowException(msg)
kwargs[] = dag_params
result = func(*args, **kwargs)
return result
return wrapper | Function decorator that Looks for an argument named "default_args", and
fills the unspecified arguments from it.
Since python2.* isn't clear about which arguments are missing when
calling a function, and that this can be quite confusing with multi-level
inheritance and argument defaults, this decorator also alerts with
specific information about the missing arguments. |
22,340 | def _set_typeattr(typeattr, existing_ta = None):
if existing_ta is None:
ta = TypeAttr(attr_id=typeattr.attr_id)
else:
ta = existing_ta
ta.unit_id = typeattr.unit_id
ta.type_id = typeattr.type_id
ta.data_type = typeattr.data_type
if hasattr(typeattr, ) and typeattr.default_dataset_id is not None:
ta.default_dataset_id = typeattr.default_dataset_id
ta.description = typeattr.description
ta.properties = typeattr.get_properties()
ta.attr_is_var = typeattr.is_var if typeattr.is_var is not None else
ta.data_restriction = _parse_data_restriction(typeattr.data_restriction)
if typeattr.dimension_id is None:
pass
else:
if typeattr.attr_id is not None and typeattr.attr_id > 0:
attr = ta.attr
if attr is not None and attr.dimension_id is not None and attr.dimension_id != typeattr.dimension_id or \
attr is not None and attr.dimension_id is not None:
raise HydraError("Cannot set a dimension on type attribute which "
"does not match its attribute. Create a new attribute if "
"you want to use attribute %s with dimension_id %s"%
(attr.name, typeattr.dimension_id))
elif typeattr.attr_id is None and typeattr.name is not None:
attr = _get_attr_by_name_and_dimension(typeattr.name, typeattr.dimension_id)
ta.attr_id = attr.id
ta.attr = attr
_check_dimension(ta)
if existing_ta is None:
log.debug("Adding ta to DB")
db.DBSession.add(ta)
return ta | Add or updsate a type attribute.
If an existing type attribute is provided, then update.
Checks are performed to ensure that the dimension provided on the
type attr (not updateable) is the same as that on the referring attribute.
The unit provided (stored on tattr) must conform to the dimension stored
on the referring attribute (stored on tattr).
This is done so that multiple tempaltes can all use the same attribute,
but specify different units.
If no attr_id is provided, but an attr_name and dimension are provided,
then a new attribute can be created (or retrived) and used. I.e., no
attribute ID must be specified if attr_name and dimension are specified.
***WARNING***
Setting attribute ID to null means a new type attribute (and even a new attr)
may be added, None are removed or replaced. To remove other type attrs, do it
manually using delete_typeattr |
22,341 | def load_data(handle, reader=None):
if not reader:
reader = os.path.splitext(handle)[1][1:].lower()
if reader not in _READERS:
raise NeuroMError( % reader)
filename = _get_file(handle)
try:
return _READERS[reader](filename)
except Exception as e:
L.exception(, filename, reader)
raise RawDataError( % (filename, str(e))) | Unpack data into a raw data wrapper |
22,342 | def genome_info(genome, info):
try:
scg = info[]
dups = info[]
length = info[]
return [scg - dups, length, genome]
except:
return [False, False, info[], genome] | return genome info for choosing representative
if ggKbase table provided - choose rep based on SCGs and genome length
- priority for most SCGs - extra SCGs, then largest genome
otherwise, based on largest genome |
22,343 | def get(self, sid):
return ModelBuildContext(self._version, assistant_sid=self._solution[], sid=sid, ) | Constructs a ModelBuildContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.autopilot.v1.assistant.model_build.ModelBuildContext
:rtype: twilio.rest.autopilot.v1.assistant.model_build.ModelBuildContext |
22,344 | def cull_to_timestep(self, timestep=1):
valid_s = self.header.analysis_period.VALIDTIMESTEPS.keys()
assert timestep in valid_s, \
.format(timestep, valid_s)
new_ap, new_values, new_datetimes = self._timestep_cull(timestep)
new_header = self.header.duplicate()
new_header._analysis_period = new_ap
new_coll = HourlyDiscontinuousCollection(
new_header, new_values, new_datetimes)
new_coll._validated_a_period = True
return new_coll | Get a collection with only datetimes that fit a timestep. |
22,345 | def max_dimension(cellmap, sheet = None):
cells = list(cellmap.values())
rows = 0
cols = 0
for cell in cells:
if sheet is None or cell.sheet == sheet:
rows = max(rows, int(cell.row))
cols = max(cols, int(col2num(cell.col)))
return (rows, cols) | This function calculates the maximum dimension of the workbook or optionally the worksheet. It returns a tupple
of two integers, the first being the rows and the second being the columns.
:param cellmap: all the cells that should be used to calculate the maximum.
:param sheet: (optionally) a string with the sheet name.
:return: a tupple of two integers, the first being the rows and the second being the columns. |
22,346 | def bootstrap_standby_leader(self):
clone_source = self.get_remote_master()
msg = .format(clone_source.conn_url)
result = self.clone(clone_source, msg)
self._post_bootstrap_task.complete(result)
if result:
self.state_handler.set_role()
return result | If we found 'standby' key in the configuration, we need to bootstrap
not a real master, but a 'standby leader', that will take base backup
from a remote master and start follow it. |
22,347 | def bst(height=3, is_perfect=False):
_validate_tree_height(height)
if is_perfect:
return _generate_perfect_bst(height)
values = _generate_random_node_values(height)
leaf_count = _generate_random_leaf_count(height)
root = Node(values.pop(0))
leaves = set()
for value in values:
node = root
depth = 0
inserted = False
while depth < height and not inserted:
attr = if node.value > value else
if getattr(node, attr) is None:
setattr(node, attr, Node(value))
inserted = True
node = getattr(node, attr)
depth += 1
if inserted and depth == height:
leaves.add(node)
if len(leaves) == leaf_count:
break
return root | Generate a random BST (binary search tree) and return its root node.
:param height: Height of the BST (default: 3, range: 0 - 9 inclusive).
:type height: int
:param is_perfect: If set to True (default: False), a perfect BST with all
levels filled is returned. If set to False, a perfect BST may still be
generated by chance.
:type is_perfect: bool
:return: Root node of the BST.
:rtype: binarytree.Node
:raise binarytree.exceptions.TreeHeightError: If height is invalid.
**Example**:
.. doctest::
>>> from binarytree import bst
>>>
>>> root = bst()
>>>
>>> root.height
3
>>> root.is_bst
True
.. doctest::
>>> from binarytree import bst
>>>
>>> root = bst(10) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TreeHeightError: height must be an int between 0 - 9 |
22,348 | def head(self):
if self._head is None:
self._head = self.sentence.tokens[self._head_id-1]
return self._head | The token serving as the "head" of the mention
:getter: the token corresponding to the head
:type: corenlp_xml.document.Token |
22,349 | def _get(self, uri):
uri = "%s?showRecords=false&showSubdomains=false" % uri
resp, body = self._retry_get(uri)
body["records"] = []
return self.resource_class(self, body, loaded=True) | Handles the communication with the API when getting
a specific resource managed by this class.
Because DNS returns a different format for the body,
the BaseManager method must be overridden here. |
22,350 | def evaluate_inline(self, groups):
if self.lines:
if (
self.group_comments and
self.line_num == self.prev_line + 1 and
groups[] == self.leading
):
self.line_comments[-1][0] += + groups[][2:].replace(, )
else:
self.line_comments.append(
[groups[][2:].replace(, ), self.line_num, self.current_encoding]
)
self.leading = groups[]
self.prev_line = self.line_num | Evaluate inline comments on their own lines. |
22,351 | def pass_q_v1(self):
der = self.parameters.derived.fastaccess
flu = self.sequences.fluxes.fastaccess
out = self.sequences.outlets.fastaccess
out.q[0] += der.qfactor*flu.q | Update the outlet link sequence.
Required derived parameter:
|QFactor|
Required flux sequences:
|lland_fluxes.Q|
Calculated flux sequence:
|lland_outlets.Q|
Basic equation:
:math:`Q_{outlets} = QFactor \\cdot Q_{fluxes}` |
22,352 | def target_to_list(target):
new_list = target_to_ipv4(target)
if not new_list:
new_list = target_to_ipv6(target)
if not new_list:
new_list = target_to_ipv4_cidr(target)
if not new_list:
new_list = target_to_ipv6_cidr(target)
if not new_list:
new_list = target_to_ipv4_short(target)
if not new_list:
new_list = target_to_ipv4_long(target)
if not new_list:
new_list = target_to_ipv6_short(target)
if not new_list:
new_list = target_to_ipv6_long(target)
if not new_list:
new_list = target_to_hostname(target)
return new_list | Attempt to return a list of single hosts from a target string. |
22,353 | def migrate_flow_collection(apps, schema_editor):
Process = apps.get_model(, )
DescriptorSchema = apps.get_model(, )
for process in Process.objects.all():
process.entity_type = process.flow_collection
process.entity_descriptor_schema = process.flow_collection
if (process.entity_descriptor_schema is not None and
not DescriptorSchema.objects.filter(slug=process.entity_descriptor_schema).exists()):
raise LookupError(
"Descriptow schema referenced in not "
"found.".format(process.entity_descriptor_schema)
)
process.save() | Migrate 'flow_collection' field to 'entity_type'. |
22,354 | def _init_polling(self):
with self.lock:
if not self.running:
return
r = random.Random()
delay = r.random() * self.refresh_interval
self.channel.io_loop.call_later(
delay=delay, callback=self._delayed_polling)
self.logger.info(
, delay) | Bootstrap polling for throttler.
To avoid spiky traffic from throttler clients, we use a random delay
before the first poll. |
22,355 | def sendFinalResponse(self):
self.requestProtocol.requestResponse["code"] = (
self.responseCode
)
self.requestProtocol.requestResponse["content"] = (
self.responseContent
)
self.requestProtocol.requestResponse["errors"] = (
self.responseErrors
)
self.requestProtocol.sendFinalRequestResponse() | Send the final response and close the connection.
:return: <void> |
22,356 | def dumps(xs, model=None, properties=False, indent=True, **kwargs):
xs = list(xs)
if not xs:
return
given_class = xs[0].__class__
if model is None:
model = xs[0].__class__
if not hasattr(model, ):
raise TypeError(
.format(model.__name__)
)
if given_class.__name__ in (, ):
xs = [model.from_xmrs(x, **kwargs) for x in xs]
elif given_class.__name__ == and model.__name__ != :
raise ValueError()
codec = XMRSCodec()
graphs = [
codec.triples_to_graph(model.to_triples(x, properties=properties))
for x in xs
]
if in kwargs:
indent = kwargs[]
return penman.dumps(graphs, cls=XMRSCodec, indent=indent) | Serialize Xmrs (or subclass) objects to PENMAN notation
Args:
xs: iterator of :class:`~delphin.mrs.xmrs.Xmrs` objects to
serialize
model: Xmrs subclass used to get triples
properties: if `True`, encode variable properties
indent: if `True`, adaptively indent; if `False` or `None`,
don't indent; if a non-negative integer N, indent N spaces
per level
Returns:
the PENMAN serialization of *xs* |
22,357 | def bump(self, bump_reqs=None, **kwargs):
bumps = {}
for existing_req in sorted(self.requirements(), key=lambda r: r.project_name):
if bump_reqs and existing_req.project_name not in bump_reqs:
continue
bump_reqs.check(existing_req)
try:
bump = self._bump(existing_req, bump_reqs.get(existing_req.project_name))
if bump:
bumps[bump.name] = bump
bump_reqs.check(bump)
except Exception as e:
if bump_reqs and bump_reqs.get(existing_req.project_name) and all(
r.required_by is None for r in bump_reqs.get(existing_req.project_name)):
raise
else:
log.warn(e)
for reqs in bump_reqs.required_requirements().values():
name = reqs[0].project_name
if name not in bumps and self.should_add(name):
try:
bump = self._bump(None, reqs)
if bump:
bumps[bump.name] = bump
bump_reqs.check(bump)
except Exception as e:
if all(r.required_by is None for r in reqs):
raise
else:
log.warn(e)
self.bumps.update(bumps.values())
return bumps.values() | Bump dependencies using given requirements.
:param RequirementsManager bump_reqs: Bump requirements manager
:param dict kwargs: Additional args from argparse. Some bumpers accept user options, and some not.
:return: List of :class:`Bump` changes made. |
22,358 | def check_bounds(args, lowerLimit, upperLimit):
penalty = 0
bound_hit = False
for i in range(0, len(args)):
if args[i] < lowerLimit[i] or args[i] > upperLimit[i]:
penalty = 10**15
bound_hit = True
return penalty, bound_hit | checks whether the parameter vector has left its bound, if so, adds a big number |
22,359 | def getGDEFGlyphClasses(feaLib):
for st in feaLib.statements:
if isinstance(st, ast.TableBlock) and st.name == "GDEF":
for st in st.statements:
if isinstance(st, ast.GlyphClassDefStatement):
return _GDEFGlyphClasses(
frozenset(st.baseGlyphs.glyphSet())
if st.baseGlyphs is not None
else frozenset(),
frozenset(st.ligatureGlyphs.glyphSet())
if st.ligatureGlyphs is not None
else frozenset(),
frozenset(st.markGlyphs.glyphSet())
if st.markGlyphs is not None
else frozenset(),
frozenset(st.componentGlyphs.glyphSet())
if st.componentGlyphs is not None
else frozenset(),
)
return _GDEFGlyphClasses(None, None, None, None) | Return GDEF GlyphClassDef base/mark/ligature/component glyphs, or
None if no GDEF table is defined in the feature file. |
22,360 | def error_lineno(self):
if isinstance(self.docstring, Docstring):
return self.docstring.start
return self.start | Get the line number with which to report violations. |
22,361 | def _init_client():
if client is not None:
return
global _mysql_kwargs, _table_name
_mysql_kwargs = {
: __opts__.get(, ),
: __opts__.get(, None),
: __opts__.get(, None),
: __opts__.get(, _DEFAULT_DATABASE_NAME),
: __opts__.get(, 3306),
: __opts__.get(, None),
: __opts__.get(, None),
: True,
}
_table_name = __opts__.get(, _table_name)
for k, v in _mysql_kwargs.items():
if v is None:
_mysql_kwargs.pop(k)
kwargs_copy = _mysql_kwargs.copy()
kwargs_copy[] = "<hidden>"
log.info("mysql_cache: Setting up client with params: %r", kwargs_copy)
_create_table() | Initialize connection and create table if needed |
22,362 | def ic45(msg):
d = hex2bin(data(msg))
if d[9] == :
return None
ic = bin2int(d[10:12])
return ic | Icing.
Args:
msg (String): 28 bytes hexadecimal message string
Returns:
int: Icing level. 0=NIL, 1=Light, 2=Moderate, 3=Severe |
22,363 | def convert_1x_args(bucket, **kwargs):
host = kwargs.pop(, )
port = kwargs.pop(, None)
if not in kwargs and not in kwargs:
kwargs[] = _build_connstr(host, port, bucket)
return kwargs | Converts arguments for 1.x constructors to their 2.x forms |
22,364 | def run(self):
while True:
if not self.task_socket.poll(-1):
continue
msg = self.task_socket.recv_multipart()
msg_type = msg[1]
if self.debug:
self.stats.append((time.time(),
msg_type,
len(self.unassigned_tasks),
len(self.available_workers)))
if time.time() - self.lastDebugTs > TIME_BETWEEN_PARTIALDEBUG:
self.writeDebug("debug/partial-{0}".format(
round(time.time(), -1)
))
self.lastDebugTs = time.time()
if msg_type == TASK:
task_id = msg[2]
task = msg[3]
self.logger.debug("Received task {0}".format(task_id))
try:
address = self.available_workers.popleft()
except IndexError:
self.unassigned_tasks.append((task_id, task))
else:
self.logger.debug("Sent {0}".format(task_id))
self.task_socket.send_multipart([address, TASK, task])
self.assigned_tasks[address].add(task_id)
elif msg_type == REQUEST:
address = msg[0]
try:
task_id, task = self.unassigned_tasks.popleft()
except IndexError:
self.available_workers.append(address)
else:
self.logger.debug("Sent {0}".format(task_id))
self.task_socket.send_multipart([address, TASK, task])
self.assigned_tasks[address].add(task_id)
elif msg_type == STATUS_REQ:
self.pruneAssignedTasks()
address = msg[0]
task_id = msg[2]
if any(task_id in x for x in self.assigned_tasks.values()):
status = STATUS_GIVEN
elif task_id in (x[0] for x in self.unassigned_tasks):
status = STATUS_HERE
else:
status = STATUS_NONE
self.task_socket.send_multipart([
address, STATUS_ANS, task_id, status
])
elif msg_type == STATUS_DONE:
address = msg[0]
task_id = msg[2]
try:
self.assigned_tasks[address].discard(task_id)
except KeyError:
pass
elif msg_type == STATUS_UPDATE:
address = msg[0]
try:
tasks_ids = pickle.loads(msg[2])
except:
self.logger.error("Could not unpickle status update message.")
else:
self.assigned_tasks[address] = tasks_ids
self.status_times[address] = time.time()
elif msg_type == REPLY:
self.logger.debug("Relaying")
destination = msg[-1]
origin = msg[0]
self.task_socket.send_multipart([destination] + msg[1:] + [origin])
elif msg_type == VARIABLE:
address = msg[4]
value = msg[3]
key = msg[2]
self.shared_variables[address].update(
{key: value},
)
self.info_socket.send_multipart([VARIABLE,
key,
value,
address])
elif msg_type == INIT:
address = msg[0]
try:
self.processConfig(pickle.loads(msg[2]))
except pickle.PickleError:
continue
self.task_socket.send_multipart([
address,
pickle.dumps(self.config,
pickle.HIGHEST_PROTOCOL),
pickle.dumps(self.shared_variables,
pickle.HIGHEST_PROTOCOL),
])
self.task_socket.send_multipart([
address,
pickle.dumps(self.cluster_available,
pickle.HIGHEST_PROTOCOL),
])
elif msg_type == CONNECT:
try:
connect_brokers = pickle.loads(msg[2])
except pickle.PickleError:
self.logger.error("Could not understand CONNECT message.")
continue
self.logger.info("Connecting to other brokers...")
self.addBrokerList(connect_brokers)
elif msg_type == SHUTDOWN:
self.logger.debug("SHUTDOWN command received.")
self.shutdown()
break | Redirects messages until a shutdown message is received. |
22,365 | def _controller_name(self, objtype):
if objtype.endswith():
return objtype[:-1] +
if objtype[-1] in or objtype[-2:] in [, ]:
return objtype +
if objtype.endswith():
return objtype[:-2] +
return objtype + | Determines the controller name for the object's type
Args:
objtype (str): The object type
Returns:
A string with the controller name |
22,366 | def get_context(self, filename):
if self.type == :
context = .format(RE_SLIDE.search(filename).group(1))
elif self.type == :
context = .format(RE_DOCS.match(filename).group(1))
else:
context =
return context | Get context. |
22,367 | def get_auth_token(self):
data = [str(self.id), hash_data(self.password)]
return _security.remember_token_serializer.dumps(data) | Returns the user's authentication token. |
22,368 | def __get_precipfc_data(latitude, longitude):
url =
url = url.format(
round(latitude, 2),
round(longitude, 2)
)
result = __get_url(url)
return result | Get buienradar forecasted precipitation. |
22,369 | async def send_pending_messages(self):
if not self.running:
await self.open()
try:
pending = self._handler._pending_messages[:]
await self._handler.wait_async()
results = []
for m in pending:
if m.state == constants.MessageState.SendFailed:
results.append((False, MessageSendFailed(m._response)))
else:
results.append((True, None))
return results
except Exception as e:
raise MessageSendFailed(e) | Wait until all pending messages have been sent.
:returns: A list of the send results of all the pending messages. Each
send result is a tuple with two values. The first is a boolean, indicating `True`
if the message sent, or `False` if it failed. The second is an error if the message
failed, otherwise it will be `None`.
:rtype: list[tuple[bool, ~azure.servicebus.common.errors.MessageSendFailed]]
Example:
.. literalinclude:: ../examples/async_examples/test_examples_async.py
:start-after: [START queue_sender_messages]
:end-before: [END queue_sender_messages]
:language: python
:dedent: 4
:caption: Schedule messages. |
22,370 | def size(self, size):
clone = self._clone()
clone._size = size
return clone | Set the query size of this QuerySet should execute its query against. |
22,371 | def get_function(self, name: str) -> AbiFunction or None:
for func in self.functions:
if func[] == name:
return AbiFunction(func[], func[], func.get(, ))
return None | This interface is used to get an AbiFunction object from AbiInfo object by given function name.
:param name: the function name in abi file
:return: if succeed, an AbiFunction will constructed based on given function name |
22,372 | def view(self, dtype=None):
return self._constructor(self._values.view(dtype),
index=self.index).__finalize__(self) | Create a new view of the Series.
This function will return a new Series with a view of the same
underlying values in memory, optionally reinterpreted with a new data
type. The new data type must preserve the same size in bytes as to not
cause index misalignment.
Parameters
----------
dtype : data type
Data type object or one of their string representations.
Returns
-------
Series
A new Series object as a view of the same data in memory.
See Also
--------
numpy.ndarray.view : Equivalent numpy function to create a new view of
the same data in memory.
Notes
-----
Series are instantiated with ``dtype=float64`` by default. While
``numpy.ndarray.view()`` will return a view with the same data type as
the original array, ``Series.view()`` (without specified dtype)
will try using ``float64`` and may fail if the original data type size
in bytes is not the same.
Examples
--------
>>> s = pd.Series([-2, -1, 0, 1, 2], dtype='int8')
>>> s
0 -2
1 -1
2 0
3 1
4 2
dtype: int8
The 8 bit signed integer representation of `-1` is `0b11111111`, but
the same bytes represent 255 if read as an 8 bit unsigned integer:
>>> us = s.view('uint8')
>>> us
0 254
1 255
2 0
3 1
4 2
dtype: uint8
The views share the same underlying values:
>>> us[0] = 128
>>> s
0 -128
1 -1
2 0
3 1
4 2
dtype: int8 |
22,373 | def get_connection(hostname, username, logger, threads=5, use_sudo=None, detect_sudo=True):
if username:
hostname = "%s@%s" % (username, hostname)
try:
conn = remoto.Connection(
hostname,
logger=logger,
threads=threads,
detect_sudo=detect_sudo,
)
conn.global_timeout = 300
logger.debug("connected to host: %s " % hostname)
return conn
except Exception as error:
msg = "connecting to host: %s " % hostname
errors = "resulted in errors: %s %s" % (error.__class__.__name__, error)
raise RuntimeError(msg + errors) | A very simple helper, meant to return a connection
that will know about the need to use sudo. |
22,374 | def js_query(self, query: str) -> Awaitable:
if self.connected:
self.js_exec(query, self.__reqid)
fut = Future()
self.__tasks[self.__reqid] = fut
self.__reqid += 1
return fut
f = Future()
f.set_result(None)
return f | Send query to related DOM on browser.
:param str query: single string which indicates query type. |
22,375 | def handle_error(result):
if result:
return
_, error_string = get_error()
if not isinstance(error_string, str_cls):
error_string = _try_decode(error_string)
raise OSError(error_string) | Extracts the last Windows error message into a python unicode string
:param result:
A function result, 0 or None indicates failure
:return:
A unicode string error message |
22,376 | def iter_result(self, timeout=30, pagesize=100, no_hscan=False):
@gmail.com
if not self._filters and not self._order_by:
if self._model._columns[self._model._pkey]._index:
return self._iter_all_pkey()
conn = _connect(self._model)
version = list(map(int, conn.info()[].split()[:2]))
if version >= [2,8] and not no_hscan:
return self._iter_all_hscan()
return self._iter_all()
return self._iter_results(timeout, pagesize) | Iterate over the results of your query instead of getting them all with
`.all()`. Will only perform a single query. If you expect that your
processing will take more than 30 seconds to process 100 items, you
should pass `timeout` and `pagesize` to reflect an appropriate timeout
and page size to fetch at once.
Usage::
for user in User.query.endswith(email='@gmail.com').iter_result():
# do something with user
... |
22,377 | def create_dataset(group, name, data, units=, datatype=DataTypes.UNDEFINED,
chunks=True, maxshape=None, compression=None,
**attributes):
from numpy import asarray
srate = attributes.get(, None)
if not hasattr(data, ):
data = asarray(data)
if data.dtype.kind in (, , ):
raise ValueError(
"data must be in array with numeric or compound type")
if data.dtype.kind == :
if not in data.dtype.names:
raise ValueError("complex event data requires field")
if not isinstance(units, (list, tuple)):
raise ValueError("complex event data requires sequence of units")
if not len(units) == len(data.dtype.names):
raise ValueError("number of units doesnsamplessamplest really catch case where sampled data has units but doesn't
dset = group.create_dataset(
name, data=data, maxshape=maxshape, chunks=chunks, compression=compression)
set_attributes(dset, units=units, datatype=datatype, **attributes)
return dset | Create an ARF dataset under group, setting required attributes
Required arguments:
name -- the name of dataset in which to store the data
data -- the data to store
Data can be of the following types:
* sampled data: an N-D numerical array of measurements
* "simple" event data: a 1-D array of times
* "complex" event data: a 1-D array of records, with field 'start' required
Optional arguments:
datatype -- a code defining the nature of the data in the channel
units -- channel units (optional for sampled data, otherwise required)
sampling_rate -- required for sampled data and event data with units=='samples'
Arguments passed to h5py:
maxshape -- make the node resizable up to this shape. Use None for axes that
need to be unlimited.
chunks -- specify the chunk size. The optimal chunk size depends on the
intended use of the data. For single-channel sampled data the
auto-chunking (True) is probably best.
compression -- compression strategy. Can be 'gzip', 'szip', 'lzf' or an integer
in range(10) specifying gzip(N). Only gzip is really portable.
Additional arguments are set as attributes on the created dataset
Returns the created dataset |
22,378 | def line(self, x0, y0, x1, y1, char):
if x0 > x1:
x1, x0 = x0, x1
y1, y0 = y0, y1
dx = x1 - x0
dy = y1 - y0
if dx == 0 and dy == 0:
self.point(x0, y0, char)
elif abs(dx) >= abs(dy):
for x in range(x0, x1 + 1):
if dx == 0:
y = y0
else:
y = y0 + int(round((x - x0) * dy / float((dx))))
self.point(x, y, char)
elif y0 < y1:
for y in range(y0, y1 + 1):
if dy == 0:
x = x0
else:
x = x0 + int(round((y - y0) * dx / float((dy))))
self.point(x, y, char)
else:
for y in range(y1, y0 + 1):
if dy == 0:
x = x0
else:
x = x1 + int(round((y - y1) * dx / float((dy))))
self.point(x, y, char) | Create a line on ASCII canvas.
Args:
x0 (int): x coordinate where the line should start.
y0 (int): y coordinate where the line should start.
x1 (int): x coordinate where the line should end.
y1 (int): y coordinate where the line should end.
char (str): character to draw the line with. |
22,379 | def draw(self, painter, option, rect):
painter.save()
rect = self.rect()
left, top, right, bottom = self.contentsMargins()
x = rect.x() + left
y = rect.y() + top
w = rect.width() - (left + right)
h = rect.height() - (top + bottom)
r = self.roundingRadius()
l_pinch, t_pinch, r_pinch, b_pinch = self.pinch()
painter.setRenderHint(painter.Antialiasing)
painter.setRenderHint(painter.TextAntialiasing)
painter.save()
if self.isEnabled():
pen = QPen(self.borderColor())
else:
pen = QPen(self.disabledBorderColor())
pen.setWidthF(0.8)
painter.setRenderHint(painter.Antialiasing)
painter.setPen(pen)
self.drawStyle(painter, x, y, w, h, r)
painter.restore()
if self.style() == XNode.NodeStyle.Pixmap:
painter.restore()
return
icon = self.icon()
if icon and not icon.isNull():
pixmap = icon.pixmap(self.iconSize())
offset = (h - self.iconSize().height()) / 2
painter.drawPixmap(x + 4, offset, pixmap)
x += self.iconSize().width() + 4
w -= self.iconSize().width() + 4
x += 6
w -= 12
metrics = QFontMetrics(self.titleFont())
if not self.wordWrap():
e_text = metrics.elidedText(nativestring(self.displayName()),
Qt.ElideRight,
w)
else:
e_text = self.displayName()
painter.setFont(self.titleFont())
painter.drawText(x,
y,
w,
h,
Qt.AlignCenter | Qt.TextWordWrap,
e_text)
painter.restore() | Draws the node for the graphics scene. this method can and should \
be overloaded to create custom nodes.
:param painter <QPainter>
:param option <QGraphicsItemSytleOption>
:param rect <QRectF> |
22,380 | def is_met(self, ti, session, dep_context=None):
return all(status.passed for status in
self.get_dep_statuses(ti, session, dep_context)) | Returns whether or not this dependency is met for a given task instance. A
dependency is considered met if all of the dependency statuses it reports are
passing.
:param ti: the task instance to see if this dependency is met for
:type ti: airflow.models.TaskInstance
:param session: database session
:type session: sqlalchemy.orm.session.Session
:param dep_context: The context this dependency is being checked under that stores
state that can be used by this dependency.
:type dep_context: BaseDepContext |
22,381 | def add_object_file(self, obj_file):
if isinstance(obj_file, str):
obj_file = object_file.ObjectFileRef.from_path(obj_file)
ffi.lib.LLVMPY_MCJITAddObjectFile(self, obj_file) | Add object file to the jit. object_file can be instance of
:class:ObjectFile or a string representing file system path |
22,382 | def tempdir(cls, suffix=, prefix=None, dir=None):
if prefix is None:
prefix = tempfile.template
if dir is not None:
dir = str(Path(dir))
dirname = tempfile.mkdtemp(suffix, prefix, dir)
return cls(dirname).absolute() | Returns a new temporary directory.
Arguments are as for :meth:`~rpaths.Path.tempfile`, except that the
`text` argument is not accepted.
The directory is readable, writable, and searchable only by the
creating user.
The caller is responsible for deleting the directory when done with it. |
22,383 | def nn(self, x, k = 1, radius = np.inf, eps = 0.0, p = 2):
assert len(x) == self.dim, .format(len(x), self.dim)
k_x = min(k, self.size)
return self._nn(np.array(x), k_x, radius = radius, eps = eps, p = p) | Find the k nearest neighbors of x in the observed input data
:arg x: center
:arg k: the number of nearest neighbors to return (default: 1)
:arg eps: approximate nearest neighbors.
the k-th returned value is guaranteed to be no further than
(1 + eps) times the distance to the real k-th nearest neighbor.
:arg p: Which Minkowski p-norm to use. (default: 2, euclidean)
:arg radius: the maximum radius (default: +inf)
:return: distance and indexes of found nearest neighbors. |
22,384 | def fetch_userid(self, side):
for user in self.users:
obj = self.users[user]
if obj.side == side:
return user | Return the userid for the specified bed side. |
22,385 | def ICM(input_dim, num_outputs, kernel, W_rank=1,W=None,kappa=None,name=):
if kernel.input_dim != input_dim:
kernel.input_dim = input_dim
warnings.warn("kernelB'),name=name)
return K | Builds a kernel for an Intrinsic Coregionalization Model
:input_dim: Input dimensionality (does not include dimension of indices)
:num_outputs: Number of outputs
:param kernel: kernel that will be multiplied by the coregionalize kernel (matrix B).
:type kernel: a GPy kernel
:param W_rank: number tuples of the corregionalization parameters 'W'
:type W_rank: integer |
22,386 | def _declare_namespace(self, package_name):
t
seem to properly find all available namespace paths.
declare_namespace'](package_name)
mod = sys.modules[package_name]
mod.__path__ = pkgutil.extend_path(mod.__path__, package_name) | Mock for #pkg_resources.declare_namespace() which calls
#pkgutil.extend_path() afterwards as the original implementation doesn't
seem to properly find all available namespace paths. |
22,387 | def pad(cls, sequences, padding, pad_len=None):
max_len = max([len(s) for s in sequences])
pad_len = pad_len or max_len
assert pad_len >= max_len, .format(pad_len, max_len)
for i, s in enumerate(sequences):
sequences[i] = [padding] * (pad_len - len(s)) + s
return np.array(sequences) | Pads a list of sequences such that they form a matrix.
:param sequences: a list of sequences of varying lengths.
:param padding: the value of padded cells.
:param pad_len: the length of the maximum padded sequence. |
22,388 | def sync(self, resync=False):
if resync:
self._clear()
while True:
logger.debug(, self._reminder_version)
changes = self._reminders_api.list()
if in changes:
self._parseTasks(changes[])
self._reminder_version = changes[]
logger.debug(, self._reminder_version)
history = self._reminders_api.history(self._reminder_version)
if self._reminder_version == history[]:
break
while True:
logger.debug(, self._keep_version)
labels_updated = any((i.dirty for i in self._labels.values()))
changes = self._keep_api.changes(
target_version=self._keep_version,
nodes=[i.save() for i in self._findDirtyNodes()],
labels=[i.save() for i in self._labels.values()] if labels_updated else None,
)
if changes.get():
raise exception.ResyncRequiredException()
if changes.get():
raise exception.UpgradeRecommendedException()
if in changes:
self._parseUserInfo(changes[])
if in changes:
self._parseNodes(changes[])
self._keep_version = changes[]
logger.debug(, self._keep_version)
if not changes[]:
break
if _node.DEBUG:
self._clean() | Sync the local Keep tree with the server. If resyncing, local changes will be detroyed. Otherwise, local changes to notes, labels and reminders will be detected and synced up.
Args:
resync (bool): Whether to resync data.
Raises:
SyncException: If there is a consistency issue. |
22,389 | def get_subgraphs_as_molecules(self, use_weights=False):
if getattr(self, , None) is None:
self._supercell_sg = supercell_sg = self*(3,3,3)
supercell_sg.graph = nx.Graph(supercell_sg.graph)
all_subgraphs = list(nx.connected_component_subgraphs(supercell_sg.graph))
molecule_subgraphs = []
for subgraph in all_subgraphs:
intersects_boundary = any([d[] != (0, 0, 0)
for u, v, d in subgraph.edges(data=True)])
if not intersects_boundary:
molecule_subgraphs.append(subgraph)
for subgraph in molecule_subgraphs:
for n in subgraph:
subgraph.add_node(n, specie=str(supercell_sg.structure[n].specie))
def node_match(n1, n2):
return n1[] == n2[]
def edge_match(e1, e2):
if use_weights:
return e1[] == e2[]
else:
return True
unique_subgraphs = []
for subgraph in molecule_subgraphs:
already_present = [nx.is_isomorphic(subgraph, g,
node_match=node_match,
edge_match=edge_match)
for g in unique_subgraphs]
if not any(already_present):
unique_subgraphs.append(subgraph)
molecules = []
for subgraph in unique_subgraphs:
coords = [supercell_sg.structure[n].coords for n
in subgraph.nodes()]
species = [supercell_sg.structure[n].specie for n
in subgraph.nodes()]
molecule = Molecule(species, coords)
molecule = molecule.get_centered_molecule()
molecules.append(molecule)
return molecules | Retrieve subgraphs as molecules, useful for extracting
molecules from periodic crystals.
Will only return unique molecules, not any duplicates
present in the crystal (a duplicate defined as an
isomorphic subgraph).
:param use_weights (bool): If True, only treat subgraphs
as isomorphic if edges have the same weights. Typically,
this means molecules will need to have the same bond
lengths to be defined as duplicates, otherwise bond
lengths can differ. This is a fairly robust approach,
but will treat e.g. enantiomers as being duplicates.
:return: list of unique Molecules in Structure |
22,390 | def get_extra_pids(self):
while not self.is_retired:
for proc in psutil.process_iter():
for conn in proc.connections():
if conn.laddr.port == self.port:
self.logger.debug(f)
return [proc.pid]
if self.is_executable_configured():
return []
time.sleep(1)
if self.java_executable_path is None:
self.logger.info(
"Cant auto-start java because {self.java_executable_path} is not found. "
"Please start java manually!") | Gets the list of process ids that should be marked as high priority.
:return: A list of process ids that are used by this bot in addition to the ones inside the python process. |
22,391 | def _group_by(data, criteria):
if isinstance(criteria, str):
criteria_str = criteria
def criteria(x):
return x[criteria_str]
res = defaultdict(list)
for element in data:
key = criteria(element)
res[key].append(element)
return res | Group objects in data using a function or a key |
22,392 | def mmi_to_raster(self, force_flag=False, algorithm=USE_ASCII):
LOGGER.debug()
if algorithm is None:
algorithm = USE_ASCII
if self.algorithm_name:
tif_path = os.path.join(
self.output_dir, % (
self.output_basename, algorithm))
else:
tif_path = os.path.join(
self.output_dir, % self.output_basename)
if os.path.exists(tif_path) and force_flag is not True:
return tif_path
if algorithm == USE_ASCII:
ascii_path = self.mmi_to_ascii(True)
command = (
(
) % {
: which()[0],
: ascii_path,
: tif_path
}
)
LOGGER.info( % command)
self._run_command(command)
else:
vrt_path = self.mmi_to_vrt(force_flag)
if INVDIST in algorithm:
algorithm =
command = (
(
) % {
: which()[0],
: algorithm,
: self.x_minimum,
: self.x_maximum,
: self.y_minimum,
: self.y_maximum,
: self.columns,
: self.rows,
: vrt_path,
: tif_path
}
)
LOGGER.info( % command)
self._run_command(command)
if INVDIST in algorithm:
algorithm =
self.create_keyword_file(algorithm)
if self.algorithm_name:
qml_path = os.path.join(
self.output_dir, % (
self.output_basename, algorithm))
else:
qml_path = os.path.join(
self.output_dir, % self.output_basename)
qml_source_path = resources_path(, )
shutil.copyfile(qml_source_path, qml_path)
return tif_path | Convert the grid.xml's mmi column to a raster using gdal_grid.
A geotiff file will be created.
Unfortunately no python bindings exist for doing this so we are
going to do it using a shell call.
.. see also:: http://www.gdal.org/gdal_grid.html
Example of the gdal_grid call we generate::
gdal_grid -zfield "mmi" -a invdist:power=2.0:smoothing=1.0 \
-txe 126.29 130.29 -tye 0.802 4.798 -outsize 400 400 -of GTiff \
-ot Float16 -l mmi mmi.vrt mmi.tif
.. note:: It is assumed that gdal_grid is in your path.
:param force_flag: Whether to force the regeneration of the output
file. Defaults to False.
:type force_flag: bool
:param algorithm: Which re-sampling algorithm to use.
valid options are 'nearest' (for nearest neighbour), 'invdist'
(for inverse distance), 'average' (for moving average). Defaults
to 'nearest' if not specified. Note that passing re-sampling alg
parameters is currently not supported. If None is passed it will
be replaced with 'use_ascii'.
'use_ascii' algorithm will convert the mmi grid to ascii file
then convert it to raster using gdal_translate.
:type algorithm: str
:returns: Path to the resulting tif file.
:rtype: str
.. note:: For interest you can also make quite beautiful smoothed
raster using this:
gdal_grid -zfield "mmi" -a_srs EPSG:4326
-a invdist:power=2.0:smoothing=1.0 -txe 122.45 126.45
-tye -2.21 1.79 -outsize 400 400 -of GTiff
-ot Float16 -l mmi mmi.vrt mmi-trippy.tif |
22,393 | def lmx_h3k_f12k():
hparams = lmx_base()
hparams.hidden_size = 3072
hparams.filter_size = 12288
hparams.batch_size = 2048
hparams.weight_dtype = "bfloat16"
return hparams | HParams for training languagemodel_lm1b32k_packed. 880M Params. |
22,394 | def temporal_from_literal(text):
if text.count() == 1:
start, end = text.split()
return db.DateRange(
start=parse_dt(start).date(),
end=parse_dt(end).date()
)
else:
separators = text.count()
if separators == 0:
return db.DateRange(
start=date(int(text), 1, 1),
end=date(int(text), 12, 31)
)
elif separators == 1:
dt = parse_dt(text).date()
return db.DateRange(
start=dt.replace(day=1),
end=dt.replace(day=calendar.monthrange(dt.year, dt.month)[1])
) | Parse a temporal coverage from a literal ie. either:
- an ISO date range
- a single ISO date period (month,year) |
22,395 | def TrimBeginningAndEndingSlashes(path):
if path.startswith():
path = path[1:]
if path.endswith():
path = path[:-1]
return path | Trims beginning and ending slashes
:param str path:
:return:
Path with beginning and ending slashes trimmed.
:rtype: str |
22,396 | def overlay_gateway_enable_statistics_vlan_action(self, **kwargs):
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop()
enable = ET.SubElement(overlay_gateway, "enable")
statistics = ET.SubElement(enable, "statistics")
vlan_action = ET.SubElement(statistics, "vlan-action")
vlan_action.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
22,397 | def set_computer_policy(name,
setting,
cumulative_rights_assignments=True,
adml_language=):
en-US*
pol = {}
pol[name] = setting
ret = set_(computer_policy=pol,
user_policy=None,
cumulative_rights_assignments=cumulative_rights_assignments,
adml_language=adml_language)
return ret | Set a single computer policy
Args:
name (str):
The name of the policy to configure
setting (str):
The setting to configure the named policy with
cumulative_rights_assignments (bool): Determine how user rights
assignment policies are configured. If True, user right assignment
specifications are simply added to the existing policy. If False,
only the users specified will get the right (any existing will have
the right revoked)
adml_language (str): The language files to use for looking up
Administrative Template policy data (i.e. how the policy is
displayed in the GUI). Defaults to 'en-US' (U.S. English).
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' lgpo.set_computer_policy LockoutDuration 1440 |
22,398 | def get_dict_from_json_file(path, encoding=):
with open(path, encoding=encoding) as data_file:
return json.loads(data_file.read()) | Gets a dict of data form a json file.
:param path: the absolute path to the file
:param encoding: the encoding the file is in |
22,399 | def user_loc_value_to_class(axis_tag, user_loc):
if axis_tag == "wght":
return int(user_loc)
elif axis_tag == "wdth":
return min(
sorted(WIDTH_CLASS_TO_VALUE.items()),
key=lambda item: abs(item[1] - user_loc),
)[0]
raise NotImplementedError | Return the OS/2 weight or width class that is closest to the provided
user location. For weight the user location is between 0 and 1000 and for
width it is a percentage.
>>> user_loc_value_to_class('wght', 310)
310
>>> user_loc_value_to_class('wdth', 62)
2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.