code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def set_attrs(obj, attrs):
"""
Applies a collection of attributes C{attrs} to object C{obj} in the most
generic way possible.
@param obj: An instance implementing C{__setattr__}, or C{__setitem__}
@param attrs: A collection implementing the C{iteritems} function
@type attrs: Usually a dict
"""
o = setattr
if hasattr(obj, '__setitem__'):
o = type(obj).__setitem__
[o(obj, k, v) for k, v in attrs.iteritems()] | Applies a collection of attributes C{attrs} to object C{obj} in the most
generic way possible.
@param obj: An instance implementing C{__setattr__}, or C{__setitem__}
@param attrs: A collection implementing the C{iteritems} function
@type attrs: Usually a dict |
def complete(self, match, subject_graph):
"""Check the completeness of a ring match"""
size = len(match)
# check whether we have an odd strong ring
if match.forward[size-1] in subject_graph.neighbors[match.forward[size-2]]:
# we have an odd closed cycle. check if this is a strong ring
order = list(range(0, size, 2)) + list(range(1, size-1, 2))[::-1]
ok = True
for i in range(len(order)//2):
# Count the number of paths between two opposite points in the
# ring. Since the ring has an odd number of vertices, each
# vertex has two semi-opposite vertices.
count = len(list(subject_graph.iter_shortest_paths(
match.forward[order[i]],
match.forward[order[(i+size//2)%size]]
)))
if count > 1:
ok = False
break
count = len(list(subject_graph.iter_shortest_paths(
match.forward[order[i]],
match.forward[order[(i+size//2+1)%size]]
)))
if count > 1:
ok = False
break
if ok:
match.ring_vertices = tuple(match.forward[i] for i in order)
#print "RingPattern.complete: found odd ring"
return True
#print "RingPattern.complete: no odd ring"
# check whether we have an even strong ring
paths = list(subject_graph.iter_shortest_paths(
match.forward[size-1],
match.forward[size-2]
))
#print "RingPattern.complete: even paths", paths
if (size > 3 and len(paths) == 1 and len(paths[0]) == 3) or \
(size == 3 and len(paths) == 2 and len(paths[0]) == 3):
path = paths[0]
if size == 3 and path[1] == match.forward[0]:
path = paths[1]
# we have an even closed cycle. check if this is a strong ring
match.add_relation(size, path[1])
size += 1
order = list(range(0, size, 2)) + list(range(size-1, 0, -2))
ok = True
for i in range(len(order)//2):
count = len(list(subject_graph.iter_shortest_paths(
match.forward[order[i]],
match.forward[order[(i+size//2)%size]]
)))
if count != 2:
ok = False
break
if ok:
# also check if this does not violate the requirement for a
# unique origin:
if match.forward[size-1] < match.forward[0]:
ok = False
if not ok:
vertex1 = match.forward[size-1]
del match.forward[size-1]
del match.reverse[vertex1]
size -= 1
#print "RingPattern.complete: no even ring"
else:
match.ring_vertices = tuple(match.forward[i] for i in order)
#print "RingPattern.complete: found even ring"
return ok
#print "RingPattern.complete: not at all"
return False | Check the completeness of a ring match |
async def eap_options(request: web.Request) -> web.Response:
""" Get request returns the available configuration options for WPA-EAP.
Because the options for connecting to WPA-EAP secured networks are quite
complex, to avoid duplicating logic this endpoint returns a json object
describing the structure of arguments and options for the eap_config arg to
/wifi/configure.
The object is shaped like this:
{
options: [ // Supported EAP methods and their options. One of these
// method names must be passed in the eapConfig dict
{
name: str // i.e. TTLS-EAPMSCHAPv2. Should be in the eapType
// key of eapConfig when sent to /configure.
options: [
{
name: str // i.e. "username"
displayName: str // i.e. "Username"
required: bool,
type: str
}
]
}
]
}
The ``type`` keys denote the semantic kind of the argument. Valid types
are:
password: This is some kind of password. It may be a psk for the network,
an Active Directory password, or the passphrase for a private key
string: A generic string; perhaps a username, or a subject-matches
domain name for server validation
file: A file that the user must provide. This should be the id of a
file previously uploaded via POST /wifi/keys.
Although the arguments are described hierarchically, they should be
specified in eap_config as a flat dict. For instance, a /configure
invocation for TTLS/EAP-TLS might look like
```
POST
{
ssid: "my-ssid",
securityType: "wpa-eap",
hidden: false,
eapConfig : {
eapType: "TTLS/EAP-TLS", // One of the method options
identity: "[email protected]", // And then its arguments
anonymousIdentity: "[email protected]",
password: "testing123",
caCert: "12d1f180f081b",
phase2CaCert: "12d1f180f081b",
phase2ClientCert: "009909fd9fa",
phase2PrivateKey: "081009fbcbc"
phase2PrivateKeyPassword: "testing321"
}
}
```
"""
return web.json_response(EAP_CONFIG_SHAPE, status=200) | Get request returns the available configuration options for WPA-EAP.
Because the options for connecting to WPA-EAP secured networks are quite
complex, to avoid duplicating logic this endpoint returns a json object
describing the structure of arguments and options for the eap_config arg to
/wifi/configure.
The object is shaped like this:
{
options: [ // Supported EAP methods and their options. One of these
// method names must be passed in the eapConfig dict
{
name: str // i.e. TTLS-EAPMSCHAPv2. Should be in the eapType
// key of eapConfig when sent to /configure.
options: [
{
name: str // i.e. "username"
displayName: str // i.e. "Username"
required: bool,
type: str
}
]
}
]
}
The ``type`` keys denote the semantic kind of the argument. Valid types
are:
password: This is some kind of password. It may be a psk for the network,
an Active Directory password, or the passphrase for a private key
string: A generic string; perhaps a username, or a subject-matches
domain name for server validation
file: A file that the user must provide. This should be the id of a
file previously uploaded via POST /wifi/keys.
Although the arguments are described hierarchically, they should be
specified in eap_config as a flat dict. For instance, a /configure
invocation for TTLS/EAP-TLS might look like
```
POST
{
ssid: "my-ssid",
securityType: "wpa-eap",
hidden: false,
eapConfig : {
eapType: "TTLS/EAP-TLS", // One of the method options
identity: "[email protected]", // And then its arguments
anonymousIdentity: "[email protected]",
password: "testing123",
caCert: "12d1f180f081b",
phase2CaCert: "12d1f180f081b",
phase2ClientCert: "009909fd9fa",
phase2PrivateKey: "081009fbcbc"
phase2PrivateKeyPassword: "testing321"
}
}
``` |
def description(pokemon):
"""Return a description of the given Pokemon."""
r = requests.get('http://pokeapi.co/' + (pokemon.descriptions.values()[0]))
desc = eval(r.text)['description'].replace('Pokmon', 'Pokémon')
return desc | Return a description of the given Pokemon. |
def versionok_for_gui():
''' Return True if running Python is suitable for GUI Event Integration and deeper IPython integration '''
# We require Python 2.6+ ...
if sys.hexversion < 0x02060000:
return False
# Or Python 3.2+
if sys.hexversion >= 0x03000000 and sys.hexversion < 0x03020000:
return False
# Not supported under Jython nor IronPython
if sys.platform.startswith("java") or sys.platform.startswith('cli'):
return False
return True | Return True if running Python is suitable for GUI Event Integration and deeper IPython integration |
def transform(self, X, y=None):
"""Transform the columns in ``X`` according to ``self.categories_``.
Parameters
----------
X : pandas.DataFrame or dask.DataFrame
y : ignored
Returns
-------
X_trn : pandas.DataFrame or dask.DataFrame
Same type as the input. The columns in ``self.categories_`` will
be converted to categorical dtype.
"""
check_is_fitted(self, "categories_")
X = self._check_array(X).copy()
categories = self.categories_
for k, dtype in categories.items():
if _HAS_CTD:
if not isinstance(dtype, pd.api.types.CategoricalDtype):
dtype = pd.api.types.CategoricalDtype(*dtype)
X[k] = X[k].astype(dtype)
else:
cat, ordered = dtype
X[k] = X[k].astype("category").cat.set_categories(cat, ordered)
return X | Transform the columns in ``X`` according to ``self.categories_``.
Parameters
----------
X : pandas.DataFrame or dask.DataFrame
y : ignored
Returns
-------
X_trn : pandas.DataFrame or dask.DataFrame
Same type as the input. The columns in ``self.categories_`` will
be converted to categorical dtype. |
def subtract_months(self, months: int) -> datetime:
""" Subtracts a number of months from the current value """
self.value = self.value - relativedelta(months=months)
return self.value | Subtracts a number of months from the current value |
def take_at_least_n_seconds(time_s):
"""A context manager which ensures it takes at least time_s to execute.
Example:
with take_at_least_n_seconds(5):
do.Something()
do.SomethingElse()
# if Something and SomethingElse took 3 seconds, the with block with sleep
# for 2 seconds before exiting.
Args:
time_s: The number of seconds this block should take. If it doesn't take at
least this time, then this method blocks during __exit__.
Yields:
To do some actions then on completion waits the remaining time.
"""
timeout = PolledTimeout(time_s)
yield
while not timeout.has_expired():
time.sleep(timeout.remaining) | A context manager which ensures it takes at least time_s to execute.
Example:
with take_at_least_n_seconds(5):
do.Something()
do.SomethingElse()
# if Something and SomethingElse took 3 seconds, the with block with sleep
# for 2 seconds before exiting.
Args:
time_s: The number of seconds this block should take. If it doesn't take at
least this time, then this method blocks during __exit__.
Yields:
To do some actions then on completion waits the remaining time. |
def encrypt(self, s, mac_bytes=10):
""" Encrypt `s' for this pubkey. """
if isinstance(s, six.text_type):
raise ValueError(
"Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
out = BytesIO()
with self.encrypt_to(out, mac_bytes) as f:
f.write(s)
return out.getvalue() | Encrypt `s' for this pubkey. |
def enable(self, cmd="enable", pattern=r"#", re_flags=re.IGNORECASE):
"""Enable mode on MRV uses no password."""
output = ""
if not self.check_enable_mode():
self.write_channel(self.normalize_cmd(cmd))
output += self.read_until_prompt_or_pattern(
pattern=pattern, re_flags=re_flags
)
if not self.check_enable_mode():
msg = (
"Failed to enter enable mode. Please ensure you pass "
"the 'secret' argument to ConnectHandler."
)
raise ValueError(msg)
return output | Enable mode on MRV uses no password. |
def update_repodata(self, channels=None):
"""Update repodata from channels or use condarc channels if None."""
norm_channels = self.conda_get_condarc_channels(channels=channels,
normalize=True)
repodata_urls = self._set_repo_urls_from_channels(norm_channels)
self._check_repos(repodata_urls) | Update repodata from channels or use condarc channels if None. |
def gifs_trending_get(self, api_key, **kwargs):
"""
Trending GIFs Endpoint
Fetch GIFs currently trending online. Hand curated by the GIPHY editorial team. The data returned mirrors the GIFs showcased on the <a href = \"http://www.giphy.com\">GIPHY homepage</a>. Returns 25 results by default.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.gifs_trending_get(api_key, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str api_key: Giphy API Key. (required)
:param int limit: The maximum number of records to return.
:param str rating: Filters results by specified rating.
:param str fmt: Used to indicate the expected response format. Default is Json.
:return: InlineResponse200
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.gifs_trending_get_with_http_info(api_key, **kwargs)
else:
(data) = self.gifs_trending_get_with_http_info(api_key, **kwargs)
return data | Trending GIFs Endpoint
Fetch GIFs currently trending online. Hand curated by the GIPHY editorial team. The data returned mirrors the GIFs showcased on the <a href = \"http://www.giphy.com\">GIPHY homepage</a>. Returns 25 results by default.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.gifs_trending_get(api_key, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str api_key: Giphy API Key. (required)
:param int limit: The maximum number of records to return.
:param str rating: Filters results by specified rating.
:param str fmt: Used to indicate the expected response format. Default is Json.
:return: InlineResponse200
If the method is called asynchronously,
returns the request thread. |
def get_package_info(self, name): # type: (str) -> dict
"""
Return the package information given its name.
The information is returned from the cache if it exists
or retrieved from the remote server.
"""
if self._disable_cache:
return self._get_package_info(name)
return self._cache.store("packages").remember_forever(
name, lambda: self._get_package_info(name)
) | Return the package information given its name.
The information is returned from the cache if it exists
or retrieved from the remote server. |
def t_offset(self, s):
r'[+]\d+'
pos = self.pos
self.add_token('OFFSET', s)
self.pos = pos + len(s) | r'[+]\d+ |
def move(self, auth, resource, destinationresource, options={"aliases": True}, defer=False):
""" Moves a resource from one parent client to another.
Args:
auth: <cik>
resource: Identifed resource to be moved.
destinationresource: resource of client resource is being moved to.
"""
return self._call('move', auth, [resource, destinationresource, options], defer) | Moves a resource from one parent client to another.
Args:
auth: <cik>
resource: Identifed resource to be moved.
destinationresource: resource of client resource is being moved to. |
def in_nested_list(nested_list, obj):
"""return true if the object is an element of <nested_list> or of a nested
list
"""
for elmt in nested_list:
if isinstance(elmt, (list, tuple)):
if in_nested_list(elmt, obj):
return True
elif elmt == obj:
return True
return False | return true if the object is an element of <nested_list> or of a nested
list |
def dump(self, validate=True):
"""
Create bencoded :attr:`metainfo` (i.e. the content of a torrent file)
:param bool validate: Whether to run :meth:`validate` first
:return: :attr:`metainfo` as bencoded :class:`bytes`
"""
if validate:
self.validate()
return bencode(self.convert()) | Create bencoded :attr:`metainfo` (i.e. the content of a torrent file)
:param bool validate: Whether to run :meth:`validate` first
:return: :attr:`metainfo` as bencoded :class:`bytes` |
def parse(response):
"""check for errors"""
if response.status_code == 400:
try:
msg = json.loads(response.content)['message']
except (KeyError, ValueError):
msg = ''
raise ApiError(msg)
return response | check for errors |
def stop_request(self, stop_now=False):
"""Send a stop request to the daemon
:param stop_now: stop now or go to stop wait mode
:type stop_now: bool
:return: the daemon response (True)
"""
logger.debug("Sending stop request to %s, stop now: %s", self.name, stop_now)
res = self.con.get('stop_request', {'stop_now': '1' if stop_now else '0'})
return res | Send a stop request to the daemon
:param stop_now: stop now or go to stop wait mode
:type stop_now: bool
:return: the daemon response (True) |
def install_var(instance, clear_target, clear_all):
"""Install required folders in /var"""
_check_root()
log("Checking frontend library and cache directories",
emitter='MANAGE')
uid = pwd.getpwnam("hfos").pw_uid
gid = grp.getgrnam("hfos").gr_gid
join = os.path.join
# If these need changes, make sure they are watertight and don't remove
# wanted stuff!
target_paths = (
'/var/www/challenges', # For LetsEncrypt acme certificate challenges
join('/var/lib/hfos', instance),
join('/var/local/hfos', instance),
join('/var/local/hfos', instance, 'backup'),
join('/var/cache/hfos', instance),
join('/var/cache/hfos', instance, 'tilecache'),
join('/var/cache/hfos', instance, 'rastertiles'),
join('/var/cache/hfos', instance, 'rastercache')
)
logfile = "/var/log/hfos-" + instance + ".log"
for item in target_paths:
if os.path.exists(item):
log("Path already exists: " + item)
if clear_all or (clear_target and 'cache' in item):
log("Cleaning up: " + item, lvl=warn)
shutil.rmtree(item)
if not os.path.exists(item):
log("Creating path: " + item)
os.mkdir(item)
os.chown(item, uid, gid)
# Touch logfile to make sure it exists
open(logfile, "a").close()
os.chown(logfile, uid, gid)
log("Done: Install Var") | Install required folders in /var |
def train(self, s, path="spelling.txt"):
""" Counts the words in the given string and saves the probabilities at the given path.
This can be used to generate a new model for the Spelling() constructor.
"""
model = {}
for w in re.findall("[a-z]+", s.lower()):
model[w] = w in model and model[w] + 1 or 1
model = ("%s %s" % (k, v) for k, v in sorted(model.items()))
model = "\n".join(model)
f = open(path, "w")
f.write(model)
f.close() | Counts the words in the given string and saves the probabilities at the given path.
This can be used to generate a new model for the Spelling() constructor. |
def unique_append(self, value):
""" function for only appending unique items to a list.
#! consider the possibility of item using this to a set
"""
if value not in self:
try:
super(self.__class__, self).append(Uri(value))
except AttributeError as err:
if isinstance(value, MODULE.rdfclass.RdfClassBase):
super(self.__class__, self).append(value)
else:
raise err | function for only appending unique items to a list.
#! consider the possibility of item using this to a set |
def _command(self, sock_info, command, slave_ok=False,
read_preference=None,
codec_options=None, check=True, allowable_errors=None,
read_concern=None,
write_concern=None,
collation=None,
session=None,
retryable_write=False,
user_fields=None):
"""Internal command helper.
:Parameters:
- `sock_info` - A SocketInfo instance.
- `command` - The command itself, as a SON instance.
- `slave_ok`: whether to set the SlaveOkay wire protocol bit.
- `codec_options` (optional) - An instance of
:class:`~bson.codec_options.CodecOptions`.
- `check`: raise OperationFailure if there are errors
- `allowable_errors`: errors to ignore if `check` is True
- `read_concern` (optional) - An instance of
:class:`~pymongo.read_concern.ReadConcern`.
- `write_concern`: An instance of
:class:`~pymongo.write_concern.WriteConcern`. This option is only
valid for MongoDB 3.4 and above.
- `collation` (optional) - An instance of
:class:`~pymongo.collation.Collation`.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `retryable_write` (optional): True if this command is a retryable
write.
- `user_fields` (optional): Response fields that should be decoded
using the TypeDecoders from codec_options, passed to
bson._decode_all_selective.
:Returns:
The result document.
"""
with self.__database.client._tmp_session(session) as s:
return sock_info.command(
self.__database.name,
command,
slave_ok,
read_preference or self._read_preference_for(session),
codec_options or self.codec_options,
check,
allowable_errors,
read_concern=read_concern,
write_concern=write_concern,
parse_write_concern_error=True,
collation=collation,
session=s,
client=self.__database.client,
retryable_write=retryable_write,
user_fields=user_fields) | Internal command helper.
:Parameters:
- `sock_info` - A SocketInfo instance.
- `command` - The command itself, as a SON instance.
- `slave_ok`: whether to set the SlaveOkay wire protocol bit.
- `codec_options` (optional) - An instance of
:class:`~bson.codec_options.CodecOptions`.
- `check`: raise OperationFailure if there are errors
- `allowable_errors`: errors to ignore if `check` is True
- `read_concern` (optional) - An instance of
:class:`~pymongo.read_concern.ReadConcern`.
- `write_concern`: An instance of
:class:`~pymongo.write_concern.WriteConcern`. This option is only
valid for MongoDB 3.4 and above.
- `collation` (optional) - An instance of
:class:`~pymongo.collation.Collation`.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `retryable_write` (optional): True if this command is a retryable
write.
- `user_fields` (optional): Response fields that should be decoded
using the TypeDecoders from codec_options, passed to
bson._decode_all_selective.
:Returns:
The result document. |
def usePointsForInterpolation(self,cNrm,mNrm,interpolator):
'''
Constructs a basic solution for this period, including the consumption
function and marginal value function.
Parameters
----------
cNrm : np.array
(Normalized) consumption points for interpolation.
mNrm : np.array
(Normalized) corresponding market resource points for interpolation.
interpolator : function
A function that constructs and returns a consumption function.
Returns
-------
solution_now : ConsumerSolution
The solution to this period's consumption-saving problem, with a
consumption function, marginal value function, and minimum m.
'''
# Construct the unconstrained consumption function
cFuncNowUnc = interpolator(mNrm,cNrm)
# Combine the constrained and unconstrained functions into the true consumption function
cFuncNow = LowerEnvelope(cFuncNowUnc,self.cFuncNowCnst)
# Make the marginal value function and the marginal marginal value function
vPfuncNow = MargValueFunc(cFuncNow,self.CRRA)
# Pack up the solution and return it
solution_now = ConsumerSolution(cFunc=cFuncNow, vPfunc=vPfuncNow, mNrmMin=self.mNrmMinNow)
return solution_now | Constructs a basic solution for this period, including the consumption
function and marginal value function.
Parameters
----------
cNrm : np.array
(Normalized) consumption points for interpolation.
mNrm : np.array
(Normalized) corresponding market resource points for interpolation.
interpolator : function
A function that constructs and returns a consumption function.
Returns
-------
solution_now : ConsumerSolution
The solution to this period's consumption-saving problem, with a
consumption function, marginal value function, and minimum m. |
def _validate_authority_uri_abs_path(host, path):
"""Ensure that path in URL with authority starts with a leading slash.
Raise ValueError if not.
"""
if len(host) > 0 and len(path) > 0 and not path.startswith("/"):
raise ValueError(
"Path in a URL with authority " "should start with a slash ('/') if set"
) | Ensure that path in URL with authority starts with a leading slash.
Raise ValueError if not. |
def _run_command(argv):
# type: (typing.List[str]) -> typing.Any
"""Run the command with the given CLI options and exit.
Command functions are expected to have a __doc__ string that is parseable
by docopt.
Args:
argv: The list of command line arguments supplied for a command. The
first argument is expected to be the name of the command to be run.
Note that this is different than the full arguments parsed by
docopt for the entire program.
Raises:
ValueError: Raised if the user attempted to run an invalid command.
"""
command_name, argv = _get_command_and_argv(argv)
_LOGGER.info('Running command "%s %s" with args: %s', settings.command,
command_name, argv)
subcommand = _get_subcommand(command_name)
func = call.get_callable(subcommand)
doc = usage.format_usage(subcommand.__doc__)
args = _get_parsed_args(command_name, doc, argv)
return call.call(func, args) or 0 | Run the command with the given CLI options and exit.
Command functions are expected to have a __doc__ string that is parseable
by docopt.
Args:
argv: The list of command line arguments supplied for a command. The
first argument is expected to be the name of the command to be run.
Note that this is different than the full arguments parsed by
docopt for the entire program.
Raises:
ValueError: Raised if the user attempted to run an invalid command. |
def post(self, text=None, attachments=None, source_guid=None):
"""Post a direct message to the user.
:param str text: the message content
:param attachments: message attachments
:param str source_guid: a client-side unique ID for the message
:return: the message sent
:rtype: :class:`~groupy.api.messages.DirectMessage`
"""
return self.messages.create(text=text, attachments=attachments,
source_guid=source_guid) | Post a direct message to the user.
:param str text: the message content
:param attachments: message attachments
:param str source_guid: a client-side unique ID for the message
:return: the message sent
:rtype: :class:`~groupy.api.messages.DirectMessage` |
def _mmComputeSequenceRepresentationData(self):
"""
Calculates values for the overlap distance matrix, stability within a
sequence, and distinctness between sequences. These values are cached so
that they do need to be recomputed for calls to each of several accessor
methods that use these values.
"""
if not self._sequenceRepresentationDataStale:
return
unionSDRTrace = self.mmGetTraceUnionSDR()
sequenceLabelsTrace = self.mmGetTraceSequenceLabels()
resetsTrace = self.mmGetTraceResets()
n = len(unionSDRTrace.data)
overlapMatrix = numpy.empty((n, n), dtype=uintType)
stabilityConfusionUnionSDR = []
distinctnessConfusionUnionSDR = []
for i in xrange(n):
for j in xrange(i+1):
overlapUnionSDR = len(unionSDRTrace.data[i] & unionSDRTrace.data[j])
overlapMatrix[i][j] = overlapUnionSDR
overlapMatrix[j][i] = overlapUnionSDR
if (i != j and
sequenceLabelsTrace.data[i] is not None and
not resetsTrace.data[i] and
sequenceLabelsTrace.data[j] is not None and
not resetsTrace.data[j]):
if sequenceLabelsTrace.data[i] == sequenceLabelsTrace.data[j]:
stabilityConfusionUnionSDR.append(overlapUnionSDR)
else:
distinctnessConfusionUnionSDR.append(overlapUnionSDR)
self._mmData["overlap"] = overlapMatrix
self._mmData["stabilityConfusion"] = stabilityConfusionUnionSDR
self._mmData["distinctnessConfusion"] = distinctnessConfusionUnionSDR
self._sequenceRepresentationDataStale = False | Calculates values for the overlap distance matrix, stability within a
sequence, and distinctness between sequences. These values are cached so
that they do need to be recomputed for calls to each of several accessor
methods that use these values. |
def predefinedEntity(name):
"""Check whether this name is an predefined entity. """
ret = libxml2mod.xmlGetPredefinedEntity(name)
if ret is None:raise treeError('xmlGetPredefinedEntity() failed')
return xmlEntity(_obj=ret) | Check whether this name is an predefined entity. |
def get_work_items_batch(self, work_item_get_request, project=None):
"""GetWorkItemsBatch.
Gets work items for a list of work item ids (Maximum 200)
:param :class:`<WorkItemBatchGetRequest> <azure.devops.v5_0.work_item_tracking.models.WorkItemBatchGetRequest>` work_item_get_request:
:param str project: Project ID or project name
:rtype: [WorkItem]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
content = self._serialize.body(work_item_get_request, 'WorkItemBatchGetRequest')
response = self._send(http_method='POST',
location_id='908509b6-4248-4475-a1cd-829139ba419f',
version='5.0',
route_values=route_values,
content=content)
return self._deserialize('[WorkItem]', self._unwrap_collection(response)) | GetWorkItemsBatch.
Gets work items for a list of work item ids (Maximum 200)
:param :class:`<WorkItemBatchGetRequest> <azure.devops.v5_0.work_item_tracking.models.WorkItemBatchGetRequest>` work_item_get_request:
:param str project: Project ID or project name
:rtype: [WorkItem] |
def _finish(self, update_ops, name_scope):
"""Updates beta_power variables every n batches and incrs counter."""
iter_ = self._get_iter_variable()
beta1_power, beta2_power = self._get_beta_accumulators()
with tf.control_dependencies(update_ops):
with tf.colocate_with(iter_):
def update_beta_op():
update_beta1 = beta1_power.assign(
beta1_power * self._beta1_t,
use_locking=self._use_locking)
update_beta2 = beta2_power.assign(
beta2_power * self._beta2_t,
use_locking=self._use_locking)
return tf.group(update_beta1, update_beta2)
maybe_update_beta = tf.cond(
tf.equal(iter_, 0), update_beta_op, tf.no_op)
with tf.control_dependencies([maybe_update_beta]):
update_iter = iter_.assign(tf.mod(iter_ + 1, self._n_t),
use_locking=self._use_locking)
return tf.group(
*update_ops + [update_iter, maybe_update_beta], name=name_scope) | Updates beta_power variables every n batches and incrs counter. |
def grant_client(self, client_id, read=True, write=True):
"""
Grant the given client id all the scopes and authorities
needed to work with the timeseries service.
"""
scopes = ['openid']
authorities = ['uaa.resource']
if write:
for zone in self.service.settings.data['ingest']['zone-token-scopes']:
scopes.append(zone)
authorities.append(zone)
if read:
for zone in self.service.settings.data['query']['zone-token-scopes']:
scopes.append(zone)
authorities.append(zone)
self.service.uaa.uaac.update_client_grants(client_id, scope=scopes,
authorities=authorities)
return self.service.uaa.uaac.get_client(client_id) | Grant the given client id all the scopes and authorities
needed to work with the timeseries service. |
def get_data_excel_xml(file_name, file_contents=None, on_demand=False):
'''
Loads xml excel format files.
Args:
file_name: The name of the local file, or the holder for the
extension type when the file_contents are supplied.
file_contents: The file-like object holding contents of file_name.
If left as None, then file_name is directly loaded.
on_demand: Requests that a yielder be used in place of a full data
copy (will be ignored).
'''
# NOTE this method is inefficient and uses code that's not of the highest quality
if file_contents:
xml_file = BytesIO(file_contents)
else:
xml_file = file_name
book = xmlparse.ParseExcelXMLFile(xml_file)
row_builder = lambda s, r: list(s.row_values(r))
return [XMLSheetYielder(book, index, row_builder) for index in range(len(book))] | Loads xml excel format files.
Args:
file_name: The name of the local file, or the holder for the
extension type when the file_contents are supplied.
file_contents: The file-like object holding contents of file_name.
If left as None, then file_name is directly loaded.
on_demand: Requests that a yielder be used in place of a full data
copy (will be ignored). |
def event_notify(self, event):
"""
Notify all subscribers of an event.
event -- the event that occurred
"""
if event.name not in self.available_events:
return
message = json.dumps({
'messageType': 'event',
'data': event.as_event_description(),
})
for subscriber in self.available_events[event.name]['subscribers']:
try:
subscriber.write_message(message)
except tornado.websocket.WebSocketClosedError:
pass | Notify all subscribers of an event.
event -- the event that occurred |
def has_event(self, event, cameo_code):
"""
Test whether there is an "event2" or "event3" entry for the given cameo code
Args:
event:
cameo_code:
Returns:
"""
if self.has_cameo_code(cameo_code):
entry = self.mapping.get(cameo_code)
if entry:
return entry[self.event_name[event]]
return False | Test whether there is an "event2" or "event3" entry for the given cameo code
Args:
event:
cameo_code:
Returns: |
def add_call(self, func):
"""Add a call to the trace."""
self.trace.append("{} ({}:{})".format(
object_name(func),
inspect.getsourcefile(func),
inspect.getsourcelines(func)[1]))
return self | Add a call to the trace. |
def app_size(self):
"Return the total apparent size, including children."
if self._nodes is None:
return self._app_size
return sum(i.app_size() for i in self._nodes) | Return the total apparent size, including children. |
def to_dict(self):
"""Convert attribute definition into a dictionary.
Returns
-------
dict
Json-like dictionary representation of the attribute definition
"""
obj = {
'id' : self.identifier,
'name' : self.name,
'description' : self.description,
'type' : self.data_type.to_dict()
}
if not self.default is None:
obj['default'] = self.default
return obj | Convert attribute definition into a dictionary.
Returns
-------
dict
Json-like dictionary representation of the attribute definition |
async def _buffer_body(self, reader):
"""
Buffers the body of the request
"""
remaining = int(self.headers.get('Content-Length', 0))
if remaining > 0:
try:
self.data = await reader.readexactly(remaining)
except asyncio.IncompleteReadError:
raise EOFError() | Buffers the body of the request |
def _get_default_arg(args, defaults, arg_index):
""" Method that determines if an argument has default value or not,
and if yes what is the default value for the argument
:param args: array of arguments, eg: ['first_arg', 'second_arg', 'third_arg']
:param defaults: array of default values, eg: (42, 'something')
:param arg_index: index of the argument in the argument array for which,
this function checks if a default value exists or not. And if default value
exists it would return the default value. Example argument: 1
:return: Tuple of whether there is a default or not, and if yes the default
value, eg: for index 2 i.e. for "second_arg" this function returns (True, 42)
"""
if not defaults:
return DefaultArgSpec(False, None)
args_with_no_defaults = len(args) - len(defaults)
if arg_index < args_with_no_defaults:
return DefaultArgSpec(False, None)
else:
value = defaults[arg_index - args_with_no_defaults]
if (type(value) is str):
value = '"%s"' % value
return DefaultArgSpec(True, value) | Method that determines if an argument has default value or not,
and if yes what is the default value for the argument
:param args: array of arguments, eg: ['first_arg', 'second_arg', 'third_arg']
:param defaults: array of default values, eg: (42, 'something')
:param arg_index: index of the argument in the argument array for which,
this function checks if a default value exists or not. And if default value
exists it would return the default value. Example argument: 1
:return: Tuple of whether there is a default or not, and if yes the default
value, eg: for index 2 i.e. for "second_arg" this function returns (True, 42) |
def draw_simple_elevation(world, sea_level, target):
""" This function can be used on a generic canvas (either an image to save
on disk or a canvas part of a GUI)
"""
e = world.layers['elevation'].data
c = numpy.empty(e.shape, dtype=numpy.float)
has_ocean = not (sea_level is None or world.layers['ocean'].data is None or not world.layers['ocean'].data.any()) # or 'not any ocean'
mask_land = numpy.ma.array(e, mask=world.layers['ocean'].data if has_ocean else False) # only land
min_elev_land = mask_land.min()
max_elev_land = mask_land.max()
elev_delta_land = (max_elev_land - min_elev_land) / 11.0
if has_ocean:
land = numpy.logical_not(world.layers['ocean'].data)
mask_ocean = numpy.ma.array(e, mask=land) # only ocean
min_elev_sea = mask_ocean.min()
max_elev_sea = mask_ocean.max()
elev_delta_sea = max_elev_sea - min_elev_sea
c[world.layers['ocean'].data] = ((e[world.layers['ocean'].data] - min_elev_sea) / elev_delta_sea)
c[land] = ((e[land] - min_elev_land) / elev_delta_land) + 1
else:
c = ((e - min_elev_land) / elev_delta_land) + 1
for y in range(world.height):
for x in range(world.width):
r, g, b = elevation_color(c[y, x], sea_level)
target.set_pixel(x, y, (int(r * 255), int(g * 255),
int(b * 255), 255)) | This function can be used on a generic canvas (either an image to save
on disk or a canvas part of a GUI) |
def _index_param_value(num_samples, v, indices):
"""Private helper function for parameter value indexing.
This determines whether a fit parameter `v` to a SearchCV.fit
should be indexed along with `X` and `y`. Note that this differs
from the scikit-learn version. They pass `X` and compute num_samples.
We pass `num_samples` instead.
"""
if not _is_arraylike(v) or _num_samples(v) != num_samples:
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices) | Private helper function for parameter value indexing.
This determines whether a fit parameter `v` to a SearchCV.fit
should be indexed along with `X` and `y`. Note that this differs
from the scikit-learn version. They pass `X` and compute num_samples.
We pass `num_samples` instead. |
async def request(self, command, payload, retry=3):
"""Request data."""
# pylint: disable=too-many-return-statements
if self._token is None:
_LOGGER.error("No token")
return None
_LOGGER.debug(command, payload)
nonce = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(16))
url = API_ENDPOINT_2 + command
timestamp = int(time.time())
signature = hashlib.sha1(str(REQUEST_TIMEOUT
+ str(timestamp)
+ nonce
+ self._token).encode("utf-8")).hexdigest()
headers = {
"Content-Type": "application/x-zc-object",
"Connection": "Keep-Alive",
"X-Zc-Major-Domain": "seanywell",
"X-Zc-Msg-Name": "millService",
"X-Zc-Sub-Domain": "milltype",
"X-Zc-Seq-Id": "1",
"X-Zc-Version": "1",
"X-Zc-Timestamp": str(timestamp),
"X-Zc-Timeout": REQUEST_TIMEOUT,
"X-Zc-Nonce": nonce,
"X-Zc-User-Id": str(self._user_id),
"X-Zc-User-Signature": signature,
"X-Zc-Content-Length": str(len(payload)),
}
try:
with async_timeout.timeout(self._timeout):
resp = await self.websession.post(url,
data=json.dumps(payload),
headers=headers)
except asyncio.TimeoutError:
if retry < 1:
_LOGGER.error("Timed out sending command to Mill: %s", command)
return None
return await self.request(command, payload, retry - 1)
except aiohttp.ClientError:
_LOGGER.error("Error sending command to Mill: %s", command, exc_info=True)
return None
result = await resp.text()
_LOGGER.debug(result)
if not result or result == '{"errorCode":0}':
return None
if 'access token expire' in result or 'invalid signature' in result:
if retry < 1:
return None
if not await self.connect():
return None
return await self.request(command, payload, retry - 1)
if '"error":"device offline"' in result:
if retry < 1:
_LOGGER.error("Failed to send request, %s", result)
return None
_LOGGER.debug("Failed to send request, %s. Retrying...", result)
await asyncio.sleep(3)
return await self.request(command, payload, retry - 1)
if 'errorCode' in result:
_LOGGER.error("Failed to send request, %s", result)
return None
data = json.loads(result)
return data | Request data. |
def update_firmware(filename,
host=None,
admin_username=None,
admin_password=None):
'''
Updates firmware using local firmware file
.. code-block:: bash
salt dell dracr.update_firmware firmware.exe
This executes the following command on your FX2
(using username and password stored in the pillar data)
.. code-block:: bash
racadm update –f firmware.exe -u user –p pass
'''
if os.path.exists(filename):
return _update_firmware('update -f {0}'.format(filename),
host=None,
admin_username=None,
admin_password=None)
else:
raise CommandExecutionError('Unable to find firmware file {0}'
.format(filename)) | Updates firmware using local firmware file
.. code-block:: bash
salt dell dracr.update_firmware firmware.exe
This executes the following command on your FX2
(using username and password stored in the pillar data)
.. code-block:: bash
racadm update –f firmware.exe -u user –p pass |
def format(sql, args=None):
""" Resolve variable references in a query within an environment.
This computes and resolves the transitive dependencies in the query and raises an
exception if that fails due to either undefined or circular references.
Args:
sql: query to format.
args: a dictionary of values to use in variable expansion.
Returns:
The resolved SQL text with variables expanded.
Raises:
Exception on failure.
"""
resolved_vars = {}
code = []
SqlStatement._find_recursive_dependencies(sql, args, code=code,
resolved_vars=resolved_vars)
# Rebuild the SQL string, substituting just '$' for escaped $ occurrences,
# variable references substituted with their values, or literal text copied
# over as-is.
parts = []
for (escape, placeholder, _, literal) in SqlStatement._get_tokens(sql):
if escape:
parts.append('$')
elif placeholder:
variable = placeholder[1:]
try:
value = resolved_vars[variable]
except KeyError as e:
raise Exception('Invalid sql. Unable to substitute $%s.' % e.args[0])
if isinstance(value, types.ModuleType):
value = _utils.get_default_query_from_module(value)
if isinstance(value, SqlStatement):
sql = value.format(value._sql, resolved_vars)
value = '(%s)' % sql
elif '_repr_sql_' in dir(value):
# pylint: disable=protected-access
value = value._repr_sql_()
elif isinstance(value, basestring):
value = SqlStatement._escape_string(value)
elif isinstance(value, list) or isinstance(value, tuple):
if isinstance(value, tuple):
value = list(value)
expansion = '('
for v in value:
if len(expansion) > 1:
expansion += ', '
if isinstance(v, basestring):
expansion += SqlStatement._escape_string(v)
else:
expansion += str(v)
expansion += ')'
value = expansion
else:
value = str(value)
parts.append(value)
elif literal:
parts.append(literal)
expanded = ''.join(parts)
return expanded | Resolve variable references in a query within an environment.
This computes and resolves the transitive dependencies in the query and raises an
exception if that fails due to either undefined or circular references.
Args:
sql: query to format.
args: a dictionary of values to use in variable expansion.
Returns:
The resolved SQL text with variables expanded.
Raises:
Exception on failure. |
def _by_columns(self, columns):
"""
Allow select.group and select.order accepting string and list
"""
return columns if self.isstr(columns) else self._backtick_columns(columns) | Allow select.group and select.order accepting string and list |
def read_pipe(pipe_out):
"""Read data on a pipe
Used to capture stdout data produced by libiperf
:param pipe_out: The os pipe_out
:rtype: unicode string
"""
out = b''
while more_data(pipe_out):
out += os.read(pipe_out, 1024)
return out.decode('utf-8') | Read data on a pipe
Used to capture stdout data produced by libiperf
:param pipe_out: The os pipe_out
:rtype: unicode string |
def _bounds_dist(self, p):
"""Get the lower and upper bound distances. Negative is bad."""
prob = self.problem
lb_dist = (p - prob.variable_bounds[0, ]).min()
ub_dist = (prob.variable_bounds[1, ] - p).min()
if prob.bounds.shape[0] > 0:
const = prob.inequalities.dot(p)
const_lb_dist = (const - prob.bounds[0, ]).min()
const_ub_dist = (prob.bounds[1, ] - const).min()
lb_dist = min(lb_dist, const_lb_dist)
ub_dist = min(ub_dist, const_ub_dist)
return np.array([lb_dist, ub_dist]) | Get the lower and upper bound distances. Negative is bad. |
def update_received_packet(self, received_pkt_size_bytes):
"""Update received packet metrics"""
self.update_count(self.RECEIVED_PKT_COUNT)
self.update_count(self.RECEIVED_PKT_SIZE, incr_by=received_pkt_size_bytes) | Update received packet metrics |
def get(self, id, **options):
'''Get a single item with the given ID'''
if not self._item_path:
raise AttributeError('get is not available for %s' % self._item_name)
target = self._item_path % id
json_data = self._redmine.get(target, **options)
data = self._redmine.unwrap_json(self._item_type, json_data)
data['_source_path'] = target
return self._objectify(data=data) | Get a single item with the given ID |
def _dy_shapelets(self, shapelets, beta):
"""
computes the derivative d/dx of the shapelet coeffs
:param shapelets:
:param beta:
:return:
"""
num_n = len(shapelets)
dy = np.zeros((num_n+1, num_n+1))
for n1 in range(num_n):
for n2 in range(num_n):
amp = shapelets[n1][n2]
dy[n1][n2+1] -= np.sqrt((n2+1)/2.) * amp
if n2 > 0:
dy[n1][n2-1] += np.sqrt(n2/2.) * amp
return dy/beta | computes the derivative d/dx of the shapelet coeffs
:param shapelets:
:param beta:
:return: |
def disable_share(cookie, tokens, shareid_list):
'''取消分享.
shareid_list 是一个list, 每一项都是一个shareid
'''
url = ''.join([
const.PAN_URL,
'share/cancel?channel=chunlei&clienttype=0&web=1',
'&bdstoken=', tokens['bdstoken'],
])
data = 'shareid_list=' + encoder.encode_uri(json.dumps(shareid_list))
req = net.urlopen(url, headers={
'Cookie': cookie.header_output(),
'Content-type': const.CONTENT_FORM_UTF8,
}, data=data.encode())
if req:
content = req.data
return json.loads(content.decode())
else:
return None | 取消分享.
shareid_list 是一个list, 每一项都是一个shareid |
def getAceTypeBit(self, t):
'''
returns the acetype bit of a text value
'''
try:
return self.validAceTypes[t]['BITS']
except KeyError:
raise CommandExecutionError((
'No ACE type "{0}". It should be one of the following: {1}'
).format(t, ', '.join(self.validAceTypes))) | returns the acetype bit of a text value |
def logs(self, pod=None):
""" Logs from a worker pod
You can get this pod object from the ``pods`` method.
If no pod is specified all pod logs will be returned. On large clusters
this could end up being rather large.
Parameters
----------
pod: kubernetes.client.V1Pod
The pod from which we want to collect logs.
See Also
--------
KubeCluster.pods
Client.get_worker_logs
"""
if pod is None:
return {pod.status.pod_ip: self.logs(pod) for pod in self.pods()}
return self.core_api.read_namespaced_pod_log(pod.metadata.name,
pod.metadata.namespace) | Logs from a worker pod
You can get this pod object from the ``pods`` method.
If no pod is specified all pod logs will be returned. On large clusters
this could end up being rather large.
Parameters
----------
pod: kubernetes.client.V1Pod
The pod from which we want to collect logs.
See Also
--------
KubeCluster.pods
Client.get_worker_logs |
def latitude(self):
'''Latitude in signed degrees (python float)'''
sd = dm_to_sd(self.lat)
if self.lat_dir == 'N':
return +sd
elif self.lat_dir == 'S':
return -sd
else:
return 0. | Latitude in signed degrees (python float) |
def __calculate_bu_dfs_recursively(u, b, dfs_data):
"""Calculates the b(u) lookup table with a recursive DFS."""
first_time = True
for v in dfs_data['adj'][u]:
if a(v, dfs_data) == u:
if first_time:
b[v] = b[u]
else:
b[v] = D(u, dfs_data)
__calculate_bu_dfs_recursively(v, b, dfs_data)
first_time = False | Calculates the b(u) lookup table with a recursive DFS. |
def _get_unitary(self):
"""Return the current unitary in JSON Result spec format"""
unitary = np.reshape(self._unitary, 2 * [2 ** self._number_of_qubits])
# Expand complex numbers
unitary = np.stack((unitary.real, unitary.imag), axis=-1)
# Truncate small values
unitary[abs(unitary) < self._chop_threshold] = 0.0
return unitary | Return the current unitary in JSON Result spec format |
def estimate_K_knee(self, th=.015, maxK=12):
"""Estimates the K using K-means and BIC, by sweeping various K and
choosing the optimal BIC."""
# Sweep K-means
if self.X.shape[0] < maxK:
maxK = self.X.shape[0]
if maxK < 2:
maxK = 2
K = np.arange(1, maxK)
bics = []
for k in K:
means, labels = self.run_kmeans(self.X, k)
bic = self.compute_bic(self.X, means, labels, K=k,
R=self.X.shape[0])
bics.append(bic)
diff_bics = np.diff(bics)
finalK = K[-1]
if len(bics) == 1:
finalK = 2
else:
# Normalize
bics = np.asarray(bics)
bics -= bics.min()
#bics /= bics.max()
diff_bics -= diff_bics.min()
#diff_bics /= diff_bics.max()
#print bics, diff_bics
# Find optimum K
for i in range(len(K[:-1])):
#if bics[i] > diff_bics[i]:
if diff_bics[i] < th and K[i] != 1:
finalK = K[i]
break
#print "Estimated K: ", finalK
if self.plot:
plt.subplot(2, 1, 1)
plt.plot(K, bics, label="BIC")
plt.plot(K[:-1], diff_bics, label="BIC diff")
plt.legend(loc=2)
plt.subplot(2, 1, 2)
plt.scatter(self.X[:, 0], self.X[:, 1])
plt.show()
return finalK | Estimates the K using K-means and BIC, by sweeping various K and
choosing the optimal BIC. |
def validate_query(self, using=None, **kwargs):
"""
Validate a potentially expensive query without executing it.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.validate_query`` unchanged.
"""
return self._get_connection(using).indices.validate_query(index=self._name, **kwargs) | Validate a potentially expensive query without executing it.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.validate_query`` unchanged. |
def jprecess(ra, dec, mu_radec=None, parallax=None, rad_vel=None, epoch=None):
"""
NAME:
JPRECESS
PURPOSE:
Precess astronomical coordinates from B1950 to J2000
EXPLANATION:
Calculate the mean place of a star at J2000.0 on the FK5 system from the
mean place at B1950.0 on the FK4 system.
Use BPRECESS for the reverse direction J2000 ==> B1950
CALLING SEQUENCE:
jprecess, ra, dec, ra_2000, dec_2000, [ MU_RADEC = , PARALLAX =
RAD_VEL =, EPOCH = ]
INPUTS:
RA,DEC - input B1950 right ascension and declination in *degrees*.
Scalar or vector
OUTPUTS:
RA_2000, DEC_2000 - the corresponding J2000 right ascension and
declination in *degrees*. Same number of elements as RA,DEC
but always double precision.
OPTIONAL INPUT-OUTPUT KEYWORDS
MU_RADEC - 2xN element double precision vector containing the proper
motion in seconds of arc per tropical *century* in right
ascension and declination.
PARALLAX - N_element vector giving stellar parallax (seconds of arc)
RAD_VEL - N_element vector giving radial velocity in km/s
The values of MU_RADEC, PARALLAX, and RADVEL will all be modified
upon output to contain the values of these quantities in the
J2000 system. Values will also be converted to double precision.
The parallax and radial velocity will have a very minor influence on
the J2000 position.
EPOCH - scalar giving epoch of original observations, default 1950.0d
This keyword value is only used if the MU_RADEC keyword is not set.
NOTES:
The algorithm is taken from the Explanatory Supplement to the
Astronomical Almanac 1992, page 184.
Also see Aoki et al (1983), A&A, 128,263
JPRECESS distinguishes between the following two cases:
(1) The proper motion is known and non-zero
(2) the proper motion is unknown or known to be exactly zero (i.e.
extragalactic radio sources). In this case, the algorithm
in Appendix 2 of Aoki et al. (1983) is used to ensure that
the output proper motion is exactly zero. Better precision
can be achieved in this case by inputting the EPOCH of the
original observations.
The error in using the IDL procedure PRECESS for converting between
B1950 and J2000 can be up to 12", mainly in right ascension. If
better accuracy than this is needed then JPRECESS should be used.
EXAMPLE:
The SAO catalogue gives the B1950 position and proper motion for the
star HD 119288. Find the J2000 position.
RA(1950) = 13h 39m 44.526s Dec(1950) = 8d 38' 28.63''
Mu(RA) = -.0259 s/yr Mu(Dec) = -.093 ''/yr
IDL> mu_radec = 100D* [ -15D*.0259, -0.093 ]
IDL> ra = ten(13,39,44.526)*15.D
IDL> dec = ten(8,38,28.63)
IDL> jprecess, ra, dec, ra2000, dec2000, mu_radec = mu_radec
IDL> print, adstring(ra2000, dec2000,2)
===> 13h 42m 12.740s +08d 23' 17.69"
RESTRICTIONS:
"When transferring individual observations, as opposed to catalog mean
place, the safest method is to tranform the observations back to the
epoch of the observation, on the FK4 system (or in the system that was
used to to produce the observed mean place), convert to the FK5 system,
and transform to the the epoch and equinox of J2000.0" -- from the
Explanatory Supplement (1992), p. 180
REVISION HISTORY:
Written, W. Landsman September, 1992
Corrected a couple of typos in M matrix October, 1992
Vectorized, W. Landsman February, 1994
Implement Appendix 2 of Aoki et al. (1983) for case where proper
motion unknown or exactly zero W. Landsman November, 1994
Converted to IDL V5.0 W. Landsman September 1997
Fixed typo in updating proper motion W. Landsman April 1999
Make sure proper motion is floating point W. Landsman December 2000
Use V6.0 notation W. Landsman Mar 2011
Converted to python by A. Drlica-Wagner Feb 2014
"""
if isinstance(ra, ndarray):
ra = array(ra)
dec = array(dec)
else:
ra = array([ra0])
dec = array([dec0])
n = ra.size
if rad_vel is None:
rad_vel = zeros(n,dtype=float)
else:
if not isinstance(rad_vel, ndarray):
rad_vel = array([rad_vel],dtype=float)
if rad_vel.size != n:
raise Exception('ERROR - RAD_VEL keyword vector must be of the same length as RA and DEC')
if (mu_radec is not None):
if (array(mu_radec).size != 2 * n):
raise Exception('ERROR - MU_RADEC keyword (proper motion) be dimensioned (2,' + strtrim(n, 2) + ')')
mu_radec = mu_radec * 1.
if parallax is None:
parallax = zeros(n,dtype=float)
else:
if not isinstance(parallax, ndarray):
parallax = array([parallax],dtype=float)
if epoch is None:
epoch = 1950.0e0
radeg = 180.e0 / pi
sec_to_radian = 1/radeg/3600.
#sec_to_radian = lambda x : deg2rad(x/3600.)
m = array([
array([+0.9999256782e0, +0.0111820610e0, +0.0048579479e0, \
-0.000551e0, +0.238514e0, -0.435623e0 ]),
array([ -0.0111820611e0, +0.9999374784e0, -0.0000271474e0, \
-0.238565e0, -0.002667e0, +0.012254e0 ]),
array([ -0.0048579477e0, -0.0000271765e0, +0.9999881997e0 , \
+0.435739e0, -0.008541e0, +0.002117e0 ]),
array([ +0.00000242395018e0, +0.00000002710663e0, +0.00000001177656e0, \
+0.99994704e0, +0.01118251e0, +0.00485767e0 ]),
array([ -0.00000002710663e0, +0.00000242397878e0, -0.00000000006582e0, \
-0.01118251e0, +0.99995883e0, -0.00002714e0]),
array([ -0.00000001177656e0, -0.00000000006587e0, 0.00000242410173e0, \
-0.00485767e0, -0.00002718e0, 1.00000956e0 ]) ])
a = 1e-6 * array([ -1.62557e0, -0.31919e0, -0.13843e0]) #in radians
a_dot = 1e-3 * array([1.244e0, -1.579e0, -0.660e0 ]) #in arc seconds per century
ra_rad = deg2rad(ra)
dec_rad = deg2rad(dec)
cosra = cos(ra_rad)
sinra = sin(ra_rad)
cosdec = cos(dec_rad)
sindec = sin(dec_rad)
ra_2000 = ra*0.
dec_2000 = dec*0.
for i in range(n):
r0 = array([ cosra[i]*cosdec[i], sinra[i]*cosdec[i], sindec[i] ])
if (mu_radec is None):
mu_a = 0.
mu_d = 0.
else:
mu_a = mu_radec[ i, 0]
mu_d = mu_radec[ i, 1]
#Velocity vector
r0_dot = array([-mu_a*sinra[i]*cosdec[i] - mu_d*cosra[i]*sindec[i], \
mu_a*cosra[i]*cosdec[i] - mu_d*sinra[i]*sindec[i] , \
mu_d*cosdec[i] ]) + 21.095e0 * rad_vel[i] * parallax[i] * r0
r1 = r0 - a + ((a * r0).sum())*r0
r1_dot = r0_dot - a_dot + (( a * r0).sum())*r0
r_1 = concatenate((r1, r1_dot))
r = transpose(dot(transpose(m),transpose(r_1)))
if mu_radec is None:
rr = r[0:3]
v = r[3:6]
t = ((epoch - 1950.0e0) - 50.00021e0)/100.0
rr1 = rr + sec_to_radian*v*t
x = rr1[0] ; y = rr1[1] ; z = rr1[2]
else:
x = r[0] ; y = r[1] ; z = r[2]
x_dot = r[3] ; y_dot= r[4] ; z_dot = r[5]
r2 = x**2 + y**2 + z**2
rmag = sqrt( r2 )
dec_2000[i] = arcsin(z / rmag)
ra_2000[i] = arctan2(y, x)
if mu_radec is not None:
mu_radec[i, 0] = ( x*y_dot - y*x_dot) / ( x**2 + y**2)
mu_radec[i, 1] = ( z_dot* (x**2 + y**2) - z*(x*x_dot + y*y_dot) ) / \
( r2*sqrt( x**2 + y**2) )
if parallax[i] > 0.:
rad_vel[i] = ( x*x_dot + y*y_dot + z*z_dot )/ (21.095*parallax[i]*rmag)
parallax[i] = parallax[i] / rmag
neg = (ra_2000 < 0)
if neg.any() > 0:
ra_2000[neg] = ra_2000[neg] + 2.0 * pi
ra_2000 = ra_2000*radeg ; dec_2000 = dec_2000*radeg
if ra.size == 1:
ra_2000 = ra_2000[0] ; dec_2000 = dec_2000[0]
return ra_2000, dec_2000 | NAME:
JPRECESS
PURPOSE:
Precess astronomical coordinates from B1950 to J2000
EXPLANATION:
Calculate the mean place of a star at J2000.0 on the FK5 system from the
mean place at B1950.0 on the FK4 system.
Use BPRECESS for the reverse direction J2000 ==> B1950
CALLING SEQUENCE:
jprecess, ra, dec, ra_2000, dec_2000, [ MU_RADEC = , PARALLAX =
RAD_VEL =, EPOCH = ]
INPUTS:
RA,DEC - input B1950 right ascension and declination in *degrees*.
Scalar or vector
OUTPUTS:
RA_2000, DEC_2000 - the corresponding J2000 right ascension and
declination in *degrees*. Same number of elements as RA,DEC
but always double precision.
OPTIONAL INPUT-OUTPUT KEYWORDS
MU_RADEC - 2xN element double precision vector containing the proper
motion in seconds of arc per tropical *century* in right
ascension and declination.
PARALLAX - N_element vector giving stellar parallax (seconds of arc)
RAD_VEL - N_element vector giving radial velocity in km/s
The values of MU_RADEC, PARALLAX, and RADVEL will all be modified
upon output to contain the values of these quantities in the
J2000 system. Values will also be converted to double precision.
The parallax and radial velocity will have a very minor influence on
the J2000 position.
EPOCH - scalar giving epoch of original observations, default 1950.0d
This keyword value is only used if the MU_RADEC keyword is not set.
NOTES:
The algorithm is taken from the Explanatory Supplement to the
Astronomical Almanac 1992, page 184.
Also see Aoki et al (1983), A&A, 128,263
JPRECESS distinguishes between the following two cases:
(1) The proper motion is known and non-zero
(2) the proper motion is unknown or known to be exactly zero (i.e.
extragalactic radio sources). In this case, the algorithm
in Appendix 2 of Aoki et al. (1983) is used to ensure that
the output proper motion is exactly zero. Better precision
can be achieved in this case by inputting the EPOCH of the
original observations.
The error in using the IDL procedure PRECESS for converting between
B1950 and J2000 can be up to 12", mainly in right ascension. If
better accuracy than this is needed then JPRECESS should be used.
EXAMPLE:
The SAO catalogue gives the B1950 position and proper motion for the
star HD 119288. Find the J2000 position.
RA(1950) = 13h 39m 44.526s Dec(1950) = 8d 38' 28.63''
Mu(RA) = -.0259 s/yr Mu(Dec) = -.093 ''/yr
IDL> mu_radec = 100D* [ -15D*.0259, -0.093 ]
IDL> ra = ten(13,39,44.526)*15.D
IDL> dec = ten(8,38,28.63)
IDL> jprecess, ra, dec, ra2000, dec2000, mu_radec = mu_radec
IDL> print, adstring(ra2000, dec2000,2)
===> 13h 42m 12.740s +08d 23' 17.69"
RESTRICTIONS:
"When transferring individual observations, as opposed to catalog mean
place, the safest method is to tranform the observations back to the
epoch of the observation, on the FK4 system (or in the system that was
used to to produce the observed mean place), convert to the FK5 system,
and transform to the the epoch and equinox of J2000.0" -- from the
Explanatory Supplement (1992), p. 180
REVISION HISTORY:
Written, W. Landsman September, 1992
Corrected a couple of typos in M matrix October, 1992
Vectorized, W. Landsman February, 1994
Implement Appendix 2 of Aoki et al. (1983) for case where proper
motion unknown or exactly zero W. Landsman November, 1994
Converted to IDL V5.0 W. Landsman September 1997
Fixed typo in updating proper motion W. Landsman April 1999
Make sure proper motion is floating point W. Landsman December 2000
Use V6.0 notation W. Landsman Mar 2011
Converted to python by A. Drlica-Wagner Feb 2014 |
def equals(val1, val2):
"""
Returns True if the two strings are equal, False otherwise.
The time taken is independent of the number of characters that match.
For the sake of simplicity, this function executes in constant time only
when the two strings have the same length. It short-circuits when they
have different lengths.
"""
if len(val1) != len(val2):
return False
result = 0
for x, y in zip(val1, val2):
result |= ord(x) ^ ord(y)
return result == 0 | Returns True if the two strings are equal, False otherwise.
The time taken is independent of the number of characters that match.
For the sake of simplicity, this function executes in constant time only
when the two strings have the same length. It short-circuits when they
have different lengths. |
def _remove_code(site):
"""
Delete project files
@type site: Site
"""
def handle_error(function, path, excinfo):
click.secho('Failed to remove path ({em}): {p}'.format(em=excinfo.message, p=path), err=True, fg='red')
if os.path.exists(site.root):
shutil.rmtree(site.root, onerror=handle_error) | Delete project files
@type site: Site |
def _create_ucsm_host_to_service_profile_mapping(self):
"""Reads list of Service profiles and finds associated Server."""
# Get list of UCSMs without host list given in the config
ucsm_ips = [ip for ip, ucsm in CONF.ml2_cisco_ucsm.ucsms.items()
if not ucsm.ucsm_host_list]
for ucsm_ip in ucsm_ips:
with self.ucsm_connect_disconnect(ucsm_ip) as handle:
try:
sp_list = handle.query_classid('lsServer')
if sp_list is not None:
for sp in sp_list:
if sp.pn_dn:
server_name = handle.query_dn(sp.pn_dn).name
if (server_name and not
sp.oper_src_templ_name):
LOG.debug('Server %s info retrieved '
'from UCSM %s', server_name, ucsm_ip)
key = (ucsm_ip, server_name)
self.ucsm_sp_dict[key] = str(sp.dn)
self.ucsm_host_dict[server_name] = ucsm_ip
except Exception as e:
# Raise a Neutron exception. Include a description of
# the original exception.
raise cexc.UcsmConfigReadFailed(ucsm_ip=ucsm_ip, exc=e) | Reads list of Service profiles and finds associated Server. |
def calculate_lvgd_stats(nw):
"""
LV Statistics for an arbitrary network
Parameters
----------
nw: :any:`list` of NetworkDing0
The MV grid(s) to be studied
Returns
-------
lvgd_stats : pandas.DataFrame
Dataframe containing several statistical numbers about the LVGD
"""
##############################
# ETRS (equidistant) to WGS84 (conformal) projection
proj = partial(
pyproj.transform,
# pyproj.Proj(init='epsg:3035'), # source coordinate system
# pyproj.Proj(init='epsg:4326')) # destination coordinate system
pyproj.Proj(init='epsg:4326'), # source coordinate system
pyproj.Proj(init='epsg:3035')) # destination coordinate system
##############################
# close circuit breakers
nw.control_circuit_breakers(mode='close')
##############################
lv_dist_idx = 0
lv_dist_dict = {}
lv_gen_idx = 0
lv_gen_dict = {}
lv_load_idx = 0
lv_load_dict = {}
branch_idx = 0
branches_dict = {}
trafos_idx = 0
trafos_dict = {}
for mv_district in nw.mv_grid_districts():
for LA in mv_district.lv_load_areas():
for lv_district in LA.lv_grid_districts():
lv_dist_idx += 1
branches_from_station = len(lv_district.lv_grid.graph_branches_from_node(lv_district.lv_grid.station()))
lv_dist_dict[lv_dist_idx] = {
'MV_grid_id': mv_district.mv_grid.id_db,
'LV_grid_id': lv_district.lv_grid.id_db,
'Load Area ID': LA.id_db,
'Population': lv_district.population,
'Peak Load Residential': lv_district.peak_load_residential,
'Peak Load Retail': lv_district.peak_load_retail,
'Peak Load Industrial': lv_district.peak_load_industrial,
'Peak Load Agricultural': lv_district.peak_load_agricultural,
'N° of Sector Residential': lv_district.sector_count_residential,
'N° of Sector Retail': lv_district.sector_count_retail,
'N° of Sector Industrial': lv_district.sector_count_industrial,
'N° of Sector Agricultural': lv_district.sector_count_agricultural,
'Accum. Consumption Residential': lv_district.sector_consumption_residential,
'Accum. Consumption Retail': lv_district.sector_consumption_retail,
'Accum. Consumption Industrial': lv_district.sector_consumption_industrial,
'Accum. Consumption Agricultural': lv_district.sector_consumption_agricultural,
'N° of branches from LV Station': branches_from_station,
'Load Area is Aggregated': LA.is_aggregated,
'Load Area is Satellite': LA.is_satellite,
}
# generation capacity
for g in lv_district.lv_grid.generators():
lv_gen_idx += 1
subtype = g.subtype
if subtype == None:
subtype = 'other'
type = g.type
if type == None:
type = 'other'
lv_gen_dict[lv_gen_idx] = {
'LV_grid_id': lv_district.lv_grid.id_db,
'v_level': g.v_level,
'subtype': type + '/' + subtype,
'GenCap': g.capacity,
}
# nodes bzw. LV loads
for node in lv_district.lv_grid.graph_nodes_sorted():
if isinstance(node, LVLoadDing0):
lv_load_idx += 1
if 'agricultural' in node.consumption:
tipo = 'agricultural'
elif 'industrial' in node.consumption:
tipo = 'ind_ret'
elif 'residential' in node.consumption:
tipo = 'residential'
else:
tipo = 'none'
lv_load_dict[lv_load_idx] = {
'LV_grid_id': lv_district.lv_grid.id_db,
'load_type': tipo,
}
# branches
for branch in lv_district.lv_grid.graph_edges():
branch_idx += 1
branches_dict[branch_idx] = {
'LV_grid_id': lv_district.lv_grid.id_db,
'length': branch['branch'].length / 1e3,
'type_name': branch['branch'].type.to_frame().columns[0],
'type_kind': branch['branch'].kind,
}
# Transformers
for trafo in lv_district.lv_grid.station().transformers():
trafos_idx += 1
trafos_dict[trafos_idx] = {
'LV_grid_id': lv_district.lv_grid.id_db,
's_max_a': trafo.s_max_a,
}
# geographic
district_geo = transform(proj, lv_district.geo_data)
lv_dist_dict[lv_dist_idx].update({'Area': district_geo.area})
lvgd_stats = pd.DataFrame.from_dict(lv_dist_dict, orient='index').set_index('LV_grid_id')
# generate partial dataframes
gen_df = pd.DataFrame.from_dict(lv_gen_dict, orient='index')
load_df = pd.DataFrame.from_dict(lv_load_dict, orient='index')
branch_df = pd.DataFrame.from_dict(branches_dict, orient='index')
trafos_df = pd.DataFrame.from_dict(trafos_dict, orient='index')
# resque desired data
if not gen_df.empty:
# generation by voltage level
lv_generation = gen_df.groupby(['LV_grid_id', 'v_level'])['GenCap'].sum().to_frame().unstack(level=-1)
lv_generation.columns = ['Gen. Cap. v_level ' + str(_[1]) if isinstance(_, tuple) else str(_) for _ in
lv_generation.columns]
lvgd_stats = pd.concat([lvgd_stats, lv_generation], axis=1)
# generation by type/subtype
lv_generation = gen_df.groupby(['LV_grid_id', 'subtype'])['GenCap'].sum().to_frame().unstack(level=-1)
lv_generation.columns = ['Gen. Cap. type ' + str(_[1]) if isinstance(_, tuple) else str(_) for _ in
lv_generation.columns]
lvgd_stats = pd.concat([lvgd_stats, lv_generation], axis=1)
if not load_df.empty:
# number of residential loads
lv_loads = load_df[load_df['load_type'] == 'residential'].groupby(['LV_grid_id'])[
'load_type'].count().to_frame() # .unstack(level=-1)
lv_loads.columns = ['N° of loads residential']
lvgd_stats = pd.concat([lvgd_stats, lv_loads], axis=1)
# number of agricultural loads
lv_loads = load_df[load_df['load_type'] == 'agricultural'].groupby(['LV_grid_id'])[
'load_type'].count().to_frame() # .unstack(level=-1)
lv_loads.columns = ['N° of loads agricultural']
lvgd_stats = pd.concat([lvgd_stats, lv_loads], axis=1)
# number of mixed industrial / retail loads
lv_loads = load_df[load_df['load_type'] == 'ind_ret'].groupby(['LV_grid_id'])[
'load_type'].count().to_frame() # .unstack(level=-1)
lv_loads.columns = ['N° of loads mixed industrial/retail']
lvgd_stats = pd.concat([lvgd_stats, lv_loads], axis=1)
if not branch_df.empty:
# branches by type name
lv_branches = branch_df.groupby(['LV_grid_id', 'type_name'])['length'].sum().to_frame().unstack(level=-1)
lv_branches.columns = ['Length Type ' + _[1] if isinstance(_, tuple) else _ for _ in lv_branches.columns]
lvgd_stats = pd.concat([lvgd_stats, lv_branches], axis=1)
# branches by kind
lv_branches = branch_df[branch_df['type_kind'] == 'line'].groupby(['LV_grid_id'])['length'].sum().to_frame()
lv_branches.columns = ['Length of overhead lines']
lvgd_stats = pd.concat([lvgd_stats, lv_branches], axis=1)
lv_branches = branch_df[branch_df['type_kind'] == 'cable'].groupby(['LV_grid_id'])['length'].sum().to_frame()
lv_branches.columns = ['Length of underground cables']
lvgd_stats = pd.concat([lvgd_stats, lv_branches], axis=1)
# N°of branches
lv_branches = branch_df.groupby(['LV_grid_id', 'type_name'])['length'].count().to_frame().unstack(level=-1)
lv_branches.columns = ['N° of branches Type ' + _[1] if isinstance(_, tuple) else _ for _ in
lv_branches.columns]
lvgd_stats = pd.concat([lvgd_stats, lv_branches], axis=1)
lv_branches = branch_df[branch_df['type_kind'] == 'line'].groupby(['LV_grid_id'])['length'].count().to_frame()
lv_branches.columns = ['N° of branches overhead lines']
lvgd_stats = pd.concat([lvgd_stats, lv_branches], axis=1)
lv_branches = branch_df[branch_df['type_kind'] == 'cable'].groupby(['LV_grid_id'])['length'].count().to_frame()
lv_branches.columns = ['N° of branches underground cables']
lvgd_stats = pd.concat([lvgd_stats, lv_branches], axis=1)
if not trafos_df.empty:
# N of trafos
lv_trafos = trafos_df.groupby(['LV_grid_id'])['s_max_a'].count().to_frame()
lv_trafos.columns = ['N° of MV/LV Trafos']
lvgd_stats = pd.concat([lvgd_stats, lv_trafos], axis=1)
# Capacity of trafos
lv_trafos = trafos_df.groupby(['LV_grid_id'])['s_max_a'].sum().to_frame()
lv_trafos.columns = ['Accumulated s_max_a in MVLV trafos']
lvgd_stats = pd.concat([lvgd_stats, lv_trafos], axis=1)
lvgd_stats = lvgd_stats.fillna(0)
lvgd_stats = lvgd_stats[sorted(lvgd_stats.columns.tolist())]
return lvgd_stats | LV Statistics for an arbitrary network
Parameters
----------
nw: :any:`list` of NetworkDing0
The MV grid(s) to be studied
Returns
-------
lvgd_stats : pandas.DataFrame
Dataframe containing several statistical numbers about the LVGD |
def update(self, name, **kwargs):
"""
Create a new node
"""
# These arguments are allowed
self.allowed('update', kwargs, ['hostname', 'port', 'status',
'storage_hostname', 'volume_type_name', 'size'])
# Remove parameters that are None
kwargs = self.unused(kwargs)
return self.http_post('/nodes/%s' % name, params=kwargs) | Create a new node |
def refetch_for_update(obj):
"""Queries the database for the same object that is passed in, refetching
its contents and runs ``select_for_update()`` to lock the corresponding
row until the next commit.
:param obj:
Object to refetch
:returns:
Refreshed version of the object
"""
return obj.__class__.objects.select_for_update().get(id=obj.id) | Queries the database for the same object that is passed in, refetching
its contents and runs ``select_for_update()`` to lock the corresponding
row until the next commit.
:param obj:
Object to refetch
:returns:
Refreshed version of the object |
def sendToTradepile(self, item_id, safe=True):
"""Send to tradepile (alias for __sendToPile__).
:params item_id: Item id.
:params safe: (optional) False to disable tradepile free space check.
"""
if safe and len(
self.tradepile()) >= self.tradepile_size: # TODO?: optimization (don't parse items in tradepile)
return False
return self.__sendToPile__('trade', item_id=item_id) | Send to tradepile (alias for __sendToPile__).
:params item_id: Item id.
:params safe: (optional) False to disable tradepile free space check. |
def _convert_value(self, item):
"""
Handle different value types for XLS. Item is a cell object.
"""
# Types:
# 0 = empty u''
# 1 = unicode text
# 2 = float (convert to int if possible, then convert to string)
# 3 = date (convert to unambiguous date/time string)
# 4 = boolean (convert to string "0" or "1")
# 5 = error (convert from code to error text)
# 6 = blank u''
# Thx to Augusto C Men to point fast solution for XLS/XLSX dates
if item.ctype == 3: # XL_CELL_DATE:
try:
return datetime.datetime(*xlrd.xldate_as_tuple(item.value, self._book.datemode))
except ValueError:
# TODO: make toggable
# Invalid date
return item.value
if item.ctype == 2: # XL_CELL_NUMBER:
if item.value % 1 == 0: # integers
return int(item.value)
else:
return item.value
return item.value | Handle different value types for XLS. Item is a cell object. |
def command_max_run_time(self, event=None):
""" CPU burst max running time - self.runtime_cfg.max_run_time """
try:
max_run_time = self.max_run_time_var.get()
except ValueError:
max_run_time = self.runtime_cfg.max_run_time
self.runtime_cfg.max_run_time = max_run_time
self.max_run_time_var.set(self.runtime_cfg.max_run_time) | CPU burst max running time - self.runtime_cfg.max_run_time |
def add_require(self, require):
""" Add a require object if it does not already exist """
for p in self.requires:
if p.value == require.value:
return
self.requires.append(require) | Add a require object if it does not already exist |
def stop(self):
"""
Stops the dependency manager (must be called before clear())
:return: The removed bindings (list) or None
"""
super(AggregateDependency, self).stop()
if self.services:
return [
(service, reference)
for reference, service in self.services.items()
]
return None | Stops the dependency manager (must be called before clear())
:return: The removed bindings (list) or None |
def _build_pools(self):
"""
Slow method, retrieve all the terms from the database.
:return:
"""
if self.level >= Topic:
# words
self.topics_pool = set(self.topic() for i in range(self.pool_size))
if self.level >= Fact:
# sentences
self.facts_pool = set(self.fact() for i in range(self.pool_size))
if self.level >= Theory:
self.theories_pool = set(self.theory() for i in range(self.pool_size))
if self.level >= Text:
self.propositions_pool = set(chain.from_iterable((self.topics_pool, self.facts_pool, self.theories_pool))) | Slow method, retrieve all the terms from the database.
:return: |
def notify(self, correlation_id, args):
"""
Fires this event and notifies all registred listeners.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param args: the parameters to raise this event with.
"""
for listener in self._listeners:
try:
listener.on_event(correlation_id, self, args)
except Exception as ex:
raise InvocationException(
correlation_id,
"EXEC_FAILED",
"Raising event " + self._name + " failed: " + str(ex)
).with_details("event", self._name).wrap(ex) | Fires this event and notifies all registred listeners.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param args: the parameters to raise this event with. |
def get_pk(obj):
""" Return primary key name by model class or instance.
:Parameters:
- `obj`: SQLAlchemy model instance or class.
:Examples:
>>> from sqlalchemy import Column, Integer
>>> from sqlalchemy.ext.declarative import declarative_base
>>> Base = declarative_base()
>>> class User(Base):
... __tablename__ = 'users'
... id = Column(Integer, primary_key=True)
>>> get_pk(User())
(Column('id', Integer(), table=<users>, primary_key=True, nullable=False),)
>>> get_pk(User)
(Column('id', Integer(), table=<users>, primary_key=True, nullable=False),)
"""
if inspect.isclass(obj):
pk_list = sqlalchemy.inspect(obj).primary_key
else:
pk_list = obj.__mapper__.primary_key
return pk_list | Return primary key name by model class or instance.
:Parameters:
- `obj`: SQLAlchemy model instance or class.
:Examples:
>>> from sqlalchemy import Column, Integer
>>> from sqlalchemy.ext.declarative import declarative_base
>>> Base = declarative_base()
>>> class User(Base):
... __tablename__ = 'users'
... id = Column(Integer, primary_key=True)
>>> get_pk(User())
(Column('id', Integer(), table=<users>, primary_key=True, nullable=False),)
>>> get_pk(User)
(Column('id', Integer(), table=<users>, primary_key=True, nullable=False),) |
def copy(self, cursor, f):
"""
Defines copying JSON from s3 into redshift.
"""
logger.info("Inserting file: %s", f)
cursor.execute("""
COPY %s from '%s'
CREDENTIALS '%s'
JSON AS '%s' %s
%s
;""" % (self.table, f, self._credentials(),
self.jsonpath, self.copy_json_options, self.copy_options)) | Defines copying JSON from s3 into redshift. |
def getVerifiers(self):
"""Returns the list of lab contacts that have verified at least one
analysis from this Analysis Request
"""
contacts = list()
for verifier in self.getVerifiersIDs():
user = api.get_user(verifier)
contact = api.get_user_contact(user, ["LabContact"])
if contact:
contacts.append(contact)
return contacts | Returns the list of lab contacts that have verified at least one
analysis from this Analysis Request |
def from_json(data):
"""
Convert JSON into a in memory file storage.
Args:
data (str): valid JSON with path and filenames and
the base64 encoding of the file content.
Returns:
InMemoryFiles: in memory file storage
"""
memfiles = InMemoryFiles()
memfiles.files = json.loads(data)
return memfiles | Convert JSON into a in memory file storage.
Args:
data (str): valid JSON with path and filenames and
the base64 encoding of the file content.
Returns:
InMemoryFiles: in memory file storage |
def create_file_service(self):
'''
Creates a FileService object with the settings specified in the
CloudStorageAccount.
:return: A service object.
:rtype: :class:`~azure.storage.file.fileservice.FileService`
'''
try:
from azure.storage.file.fileservice import FileService
return FileService(self.account_name, self.account_key,
sas_token=self.sas_token,
endpoint_suffix=self.endpoint_suffix)
except ImportError:
raise Exception('The package azure-storage-file is required. '
+ 'Please install it using "pip install azure-storage-file"') | Creates a FileService object with the settings specified in the
CloudStorageAccount.
:return: A service object.
:rtype: :class:`~azure.storage.file.fileservice.FileService` |
def add(self, elem):
""" Adds _elem_ to the collection.
# Parameters
_elem_ : `object`
> The object to be added
"""
if isinstance(elem, self._allowedTypes):
self._collection.add(elem)
self._collectedTypes.add(type(elem).__name__)
else:
raise CollectionTypeError("{} can only contain '{}', '{}' is not allowed.".format(type(self).__name__, self._allowedTypes, elem)) | Adds _elem_ to the collection.
# Parameters
_elem_ : `object`
> The object to be added |
def _symbols():
"""(Lazy)load list of all supported symbols (sorted)
Look into `_data()` for all currency symbols, then sort by length and
unicode-ord (A-Z is not as relevant as ֏).
Returns:
List[unicode]: Sorted list of possible currency symbols.
"""
global _SYMBOLS
if _SYMBOLS is None:
tmp = [(s, 'symbol') for s in _data()['symbol'].keys()]
tmp += [(s, 'alpha3') for s in _data()['alpha3'].keys()]
tmp += [(s.name, 'name') for s in _data()['alpha3'].values()]
_SYMBOLS = sorted(
tmp,
key=lambda s: (len(s[0]), ord(s[0][0])),
reverse=True)
return _SYMBOLS | (Lazy)load list of all supported symbols (sorted)
Look into `_data()` for all currency symbols, then sort by length and
unicode-ord (A-Z is not as relevant as ֏).
Returns:
List[unicode]: Sorted list of possible currency symbols. |
def joint_img(self, num_iid, pic_path, session, id=None, position=None, is_major=None):
'''taobao.item.joint.img 商品关联子图
- 关联一张商品图片到num_iid指定的商品中
- 传入的num_iid所对应的商品必须属于当前会话的用户
- 商品图片关联在卖家身份和图片来源上的限制,卖家要是B卖家或订购了多图服务才能关联图片,并且图片要来自于卖家自己的图片空间才行
- 商品图片数量有限制。不管是上传的图片还是关联的图片,他们的总数不能超过一定限额'''
request = TOPRequest('taobao.item.joint.img')
request['num_iid'] = num_iid
request['pic_path'] = pic_path
if id!=None:
request['id'] = id
if position!=None:
request['position'] = position
if is_major!=None:
request['is_major'] = is_major
self.create(self.execute(request, session)['item_img'])
return self | taobao.item.joint.img 商品关联子图
- 关联一张商品图片到num_iid指定的商品中
- 传入的num_iid所对应的商品必须属于当前会话的用户
- 商品图片关联在卖家身份和图片来源上的限制,卖家要是B卖家或订购了多图服务才能关联图片,并且图片要来自于卖家自己的图片空间才行
- 商品图片数量有限制。不管是上传的图片还是关联的图片,他们的总数不能超过一定限额 |
def create_notes_folder(self, title, parentid=""):
"""Create new folder
:param title: The title of the folder to create
:param parentid: The UUID of the parent folder
"""
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/notes/folders/create', post_data={
'title' : title,
'parentid' : parentid
})
return response | Create new folder
:param title: The title of the folder to create
:param parentid: The UUID of the parent folder |
def required(wrapping_functions, patterns_rslt):
'''
USAGE:
from django.contrib.auth.decorators import login_required
from django.contrib.admin.views.decorators import staff_member_required
mypage_patterns = required(
login_required,
[
... url patterns ...
]
)
staff_patterns = required(
staff_member_required,
[
... url patterns ...
]
)
urlpatterns += [
url(r'^staff/', include(staff_patterns, namespace='staff')),
url(r'^mypage/', include(mypage_patterns, namespace='mypage')),
]
'''
if not hasattr(wrapping_functions, '__iter__'):
wrapping_functions = (wrapping_functions,)
return [
_wrap_instance__resolve(wrapping_functions, instance)
for instance in patterns_rslt
] | USAGE:
from django.contrib.auth.decorators import login_required
from django.contrib.admin.views.decorators import staff_member_required
mypage_patterns = required(
login_required,
[
... url patterns ...
]
)
staff_patterns = required(
staff_member_required,
[
... url patterns ...
]
)
urlpatterns += [
url(r'^staff/', include(staff_patterns, namespace='staff')),
url(r'^mypage/', include(mypage_patterns, namespace='mypage')),
] |
def _exec_request(self, service, method=None, path_args=None, data=None,
params=None):
"""Execute request."""
if path_args is None:
path_args = []
req = {
'method': method or 'get',
'url': '/'.join(str(a).strip('/') for a in [
cfg.CONF.tvdb.service_url, service] + path_args),
'data': json.dumps(data) if data else None,
'headers': self.headers,
'params': params,
'verify': cfg.CONF.tvdb.verify_ssl_certs,
}
LOG.debug('executing request (%s %s)', req['method'], req['url'])
resp = self.session.request(**req)
resp.raise_for_status()
return resp.json() if resp.text else resp.text | Execute request. |
def put(self, deviceId):
"""
Puts a new device into the device store
:param deviceId:
:return:
"""
device = request.get_json()
logger.debug("Received /devices/" + deviceId + " - " + str(device))
self._deviceController.accept(deviceId, device)
return None, 200 | Puts a new device into the device store
:param deviceId:
:return: |
def predict(self, log2_bayes_factors, reset_index=False):
"""Guess the most likely modality for each event
For each event that has at least one non-NA value, if no modalilites
have logsumexp'd logliks greater than the log Bayes factor threshold,
then they are assigned the 'multimodal' modality, because we cannot
reject the null hypothesis that these did not come from the uniform
distribution.
Parameters
----------
log2_bayes_factors : pandas.DataFrame
A (4, n_events) dataframe with bayes factors for the Psi~1, Psi~0,
bimodal, and middle modalities. If an event has no bayes factors
for any of those modalities, it is ignored
reset_index : bool
If True, remove the first level of the index from the dataframe.
Useful if you are using this function to apply to a grouped
dataframe where the first level is something other than the
modality, e.g. the celltype
Returns
-------
modalities : pandas.Series
A (n_events,) series with the most likely modality for each event
"""
if reset_index:
x = log2_bayes_factors.reset_index(level=0, drop=True)
else:
x = log2_bayes_factors
if isinstance(x, pd.DataFrame):
not_na = (x.notnull() > 0).any()
not_na_columns = not_na[not_na].index
x.ix[NULL_MODEL, not_na_columns] = self.logbf_thresh
elif isinstance(x, pd.Series):
x[NULL_MODEL] = self.logbf_thresh
return x.idxmax() | Guess the most likely modality for each event
For each event that has at least one non-NA value, if no modalilites
have logsumexp'd logliks greater than the log Bayes factor threshold,
then they are assigned the 'multimodal' modality, because we cannot
reject the null hypothesis that these did not come from the uniform
distribution.
Parameters
----------
log2_bayes_factors : pandas.DataFrame
A (4, n_events) dataframe with bayes factors for the Psi~1, Psi~0,
bimodal, and middle modalities. If an event has no bayes factors
for any of those modalities, it is ignored
reset_index : bool
If True, remove the first level of the index from the dataframe.
Useful if you are using this function to apply to a grouped
dataframe where the first level is something other than the
modality, e.g. the celltype
Returns
-------
modalities : pandas.Series
A (n_events,) series with the most likely modality for each event |
def _wrap_tracebackexception_format(redact: Callable[[str], str]):
"""Monkey-patch TracebackException.format to redact printed lines.
Only the last call will be effective. Consecutive calls will overwrite the
previous monkey patches.
"""
original_format = getattr(TracebackException, '_original', None)
if original_format is None:
original_format = TracebackException.format
setattr(TracebackException, '_original', original_format)
@wraps(original_format)
def tracebackexception_format(self, *, chain=True):
for line in original_format(self, chain=chain):
yield redact(line)
setattr(TracebackException, 'format', tracebackexception_format) | Monkey-patch TracebackException.format to redact printed lines.
Only the last call will be effective. Consecutive calls will overwrite the
previous monkey patches. |
def _param_deprecation_warning(schema, deprecated, context):
"""Raises warning about using the 'old' names for some parameters.
The new naming scheme just has two underscores on each end of the word for consistency
"""
for i in deprecated:
if i in schema:
msg = 'When matching {ctx}, parameter {word} is deprecated, use __{word}__ instead'
msg = msg.format(ctx = context, word = i)
warnings.warn(msg, Warning) | Raises warning about using the 'old' names for some parameters.
The new naming scheme just has two underscores on each end of the word for consistency |
def xpath(request):
"""View for the route. Determines UUID and version from input request
and determines the type of UUID (collection or module) and executes
the corresponding method."""
ident_hash = request.params.get('id')
xpath_string = request.params.get('q')
if not ident_hash or not xpath_string:
exc = httpexceptions.HTTPBadRequest
exc.explanation = 'You must supply both a UUID and an xpath'
raise exc
try:
uuid, version = split_ident_hash(ident_hash)
except IdentHashShortId as e:
uuid = get_uuid(e.id)
version = e.version
except IdentHashMissingVersion as e:
uuid = e.id
version = get_latest_version(e.id)
except IdentHashSyntaxError:
raise httpexceptions.HTTPBadRequest
settings = get_current_registry().settings
with db_connect() as db_connection:
with db_connection.cursor() as cursor:
result = get_content_metadata(uuid, version, cursor)
resp = request.response
if result['mediaType'] == COLLECTION_MIMETYPE:
matched_route = request.matched_route.name
results = xpath_book(request, uuid, version,
return_json=matched_route.endswith('json'))
if matched_route.endswith('json'):
results = {'results': list(results)}
resp.body = json.dumps(results)
resp.content_type = 'application/json'
else:
resp.body = results
resp.content_type = 'application/xhtml+xml'
else:
results = {'results': list(xpath_page(request, uuid, version))}
resp.body = json.dumps(results)
resp.content_type = 'application/json'
resp.status = "200 OK"
return resp | View for the route. Determines UUID and version from input request
and determines the type of UUID (collection or module) and executes
the corresponding method. |
def telnet_sa_telnet_server_shutdown(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
telnet_sa = ET.SubElement(config, "telnet-sa", xmlns="urn:brocade.com:mgmt:brocade-sec-services")
telnet = ET.SubElement(telnet_sa, "telnet")
server = ET.SubElement(telnet, "server")
shutdown = ET.SubElement(server, "shutdown")
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def _get_bases(cls, ab):
"""
Start Bases & End Bases
:param ab: at bat object(type:Beautifulsoup)
:param attribute_name: attribute name
:return: start base, end base
"""
start_bases, end_bases = [], []
for base in ('1B', '2B', '3B'):
if ab.find('runner', start=base):
start_bases.append(base[0:1])
else:
start_bases.append('_')
if ab.find('runner', end=base):
end_bases.append(base[0:1])
else:
end_bases.append('_')
return ''.join(start_bases), ''.join(end_bases) | Start Bases & End Bases
:param ab: at bat object(type:Beautifulsoup)
:param attribute_name: attribute name
:return: start base, end base |
def create_app():
"""Create the standard app for ``fleaker_config`` and register the two
routes required.
"""
app = App.create_app(__name__)
app.configure('.configs.settings')
# yes, I should use blueprints; but I don't really care for such a small
# toy app
@app.route('/config')
def get_config():
"""Get the current configuration of the app."""
return jsonify(app.config)
@app.route('/put_config', methods=['PUT'])
def put_config():
"""Add to the current configuration of the app.
Takes any JSON body and adds all keys to the configs with the provided
values.
"""
data = request.json()
for key, val in data.items():
app.config[key] = val
return jsonify({'message': 'Config updated!'})
return app | Create the standard app for ``fleaker_config`` and register the two
routes required. |
def reply_sticker(
self,
sticker: str,
quote: bool = None,
disable_notification: bool = None,
reply_to_message_id: int = None,
reply_markup: Union[
"pyrogram.InlineKeyboardMarkup",
"pyrogram.ReplyKeyboardMarkup",
"pyrogram.ReplyKeyboardRemove",
"pyrogram.ForceReply"
] = None,
progress: callable = None,
progress_args: tuple = ()
) -> "Message":
"""Bound method *reply_sticker* of :obj:`Message <pyrogram.Message>`.
Use as a shortcut for:
.. code-block:: python
client.send_sticker(
chat_id=message.chat.id,
sticker=sticker
)
Example:
.. code-block:: python
message.reply_sticker(sticker)
Args:
sticker (``str``):
Sticker to send.
Pass a file_id as string to send a sticker that exists on the Telegram servers,
pass an HTTP URL as a string for Telegram to get a .webp sticker file from the Internet, or
pass a file path as string to upload a new sticker that exists on your local machine.
quote (``bool``, *optional*):
If ``True``, the message will be sent as a reply to this message.
If *reply_to_message_id* is passed, this parameter will be ignored.
Defaults to ``True`` in group chats and ``False`` in private chats.
disable_notification (``bool``, *optional*):
Sends the message silently.
Users will receive a notification with no sound.
reply_to_message_id (``int``, *optional*):
If the message is a reply, ID of the original message.
reply_markup (:obj:`InlineKeyboardMarkup` | :obj:`ReplyKeyboardMarkup` | :obj:`ReplyKeyboardRemove` | :obj:`ForceReply`, *optional*):
Additional interface options. An object for an inline keyboard, custom reply keyboard,
instructions to remove reply keyboard or to force a reply from the user.
progress (``callable``, *optional*):
Pass a callback function to view the upload progress.
The function must take *(client, current, total, \*args)* as positional arguments (look at the section
below for a detailed description).
progress_args (``tuple``, *optional*):
Extra custom arguments for the progress callback function. Useful, for example, if you want to pass
a chat_id and a message_id in order to edit a message with the updated progress.
Other Parameters:
client (:obj:`Client <pyrogram.Client>`):
The Client itself, useful when you want to call other API methods inside the callback function.
current (``int``):
The amount of bytes uploaded so far.
total (``int``):
The size of the file.
*args (``tuple``, *optional*):
Extra custom arguments as defined in the *progress_args* parameter.
You can either keep *\*args* or add every single extra argument in your function signature.
Returns:
On success, the sent :obj:`Message <pyrogram.Message>` is returned.
In case the upload is deliberately stopped with :meth:`stop_transmission`, None is returned instead.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
"""
if quote is None:
quote = self.chat.type != "private"
if reply_to_message_id is None and quote:
reply_to_message_id = self.message_id
return self._client.send_sticker(
chat_id=self.chat.id,
sticker=sticker,
disable_notification=disable_notification,
reply_to_message_id=reply_to_message_id,
reply_markup=reply_markup,
progress=progress,
progress_args=progress_args
) | Bound method *reply_sticker* of :obj:`Message <pyrogram.Message>`.
Use as a shortcut for:
.. code-block:: python
client.send_sticker(
chat_id=message.chat.id,
sticker=sticker
)
Example:
.. code-block:: python
message.reply_sticker(sticker)
Args:
sticker (``str``):
Sticker to send.
Pass a file_id as string to send a sticker that exists on the Telegram servers,
pass an HTTP URL as a string for Telegram to get a .webp sticker file from the Internet, or
pass a file path as string to upload a new sticker that exists on your local machine.
quote (``bool``, *optional*):
If ``True``, the message will be sent as a reply to this message.
If *reply_to_message_id* is passed, this parameter will be ignored.
Defaults to ``True`` in group chats and ``False`` in private chats.
disable_notification (``bool``, *optional*):
Sends the message silently.
Users will receive a notification with no sound.
reply_to_message_id (``int``, *optional*):
If the message is a reply, ID of the original message.
reply_markup (:obj:`InlineKeyboardMarkup` | :obj:`ReplyKeyboardMarkup` | :obj:`ReplyKeyboardRemove` | :obj:`ForceReply`, *optional*):
Additional interface options. An object for an inline keyboard, custom reply keyboard,
instructions to remove reply keyboard or to force a reply from the user.
progress (``callable``, *optional*):
Pass a callback function to view the upload progress.
The function must take *(client, current, total, \*args)* as positional arguments (look at the section
below for a detailed description).
progress_args (``tuple``, *optional*):
Extra custom arguments for the progress callback function. Useful, for example, if you want to pass
a chat_id and a message_id in order to edit a message with the updated progress.
Other Parameters:
client (:obj:`Client <pyrogram.Client>`):
The Client itself, useful when you want to call other API methods inside the callback function.
current (``int``):
The amount of bytes uploaded so far.
total (``int``):
The size of the file.
*args (``tuple``, *optional*):
Extra custom arguments as defined in the *progress_args* parameter.
You can either keep *\*args* or add every single extra argument in your function signature.
Returns:
On success, the sent :obj:`Message <pyrogram.Message>` is returned.
In case the upload is deliberately stopped with :meth:`stop_transmission`, None is returned instead.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error. |
def exists_or_mkdir(path, verbose=True):
"""Check a folder by given name, if not exist, create the folder and return False,
if directory exists, return True.
Parameters
----------
path : str
A folder path.
verbose : boolean
If True (default), prints results.
Returns
--------
boolean
True if folder already exist, otherwise, returns False and create the folder.
Examples
--------
>>> tl.files.exists_or_mkdir("checkpoints/train")
"""
if not os.path.exists(path):
if verbose:
logging.info("[*] creates %s ..." % path)
os.makedirs(path)
return False
else:
if verbose:
logging.info("[!] %s exists ..." % path)
return True | Check a folder by given name, if not exist, create the folder and return False,
if directory exists, return True.
Parameters
----------
path : str
A folder path.
verbose : boolean
If True (default), prints results.
Returns
--------
boolean
True if folder already exist, otherwise, returns False and create the folder.
Examples
--------
>>> tl.files.exists_or_mkdir("checkpoints/train") |
def trace_min_buffer_capacity(self):
"""Retrieves the minimum capacity the trace buffer can be configured with.
Args:
self (JLink): the ``JLink`` instance.
Returns:
The minimum configurable capacity for the trace buffer.
"""
cmd = enums.JLinkTraceCommand.GET_MIN_CAPACITY
data = ctypes.c_uint32(0)
res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(data))
if (res == 1):
raise errors.JLinkException('Failed to get min trace buffer size.')
return data.value | Retrieves the minimum capacity the trace buffer can be configured with.
Args:
self (JLink): the ``JLink`` instance.
Returns:
The minimum configurable capacity for the trace buffer. |
def can_import(self, file_uris, current_doc=None):
"""
Check that the specified file looks like an image supported by PIL
"""
if len(file_uris) <= 0:
return False
for file_uri in file_uris:
file_uri = self.fs.safe(file_uri)
if not self.check_file_type(file_uri):
return False
return True | Check that the specified file looks like an image supported by PIL |
def add_note(note, **kwargs):
"""
Add a new note
"""
note_i = Note()
note_i.ref_key = note.ref_key
note_i.set_ref(note.ref_key, note.ref_id)
note_i.value = note.value
note_i.created_by = kwargs.get('user_id')
db.DBSession.add(note_i)
db.DBSession.flush()
return note_i | Add a new note |
def _wait(self, generator, method, timeout=None, *args, **kwargs):
"""Wait until generator is paused before running 'method'."""
if self.debug:
print("waiting for %s to pause" % generator)
original_timeout = timeout
while timeout is None or timeout > 0:
last_time = time.time()
if self._lock.acquire(False): # timeout param was added in 3.2
try:
if self.can_resume():
return method(generator, *args, **kwargs)
elif self.has_terminated():
raise RuntimeError("%s has already terminated" % generator)
finally:
self._lock.release()
if timeout is not None:
timeout -= time.time() - last_time
msg = "%s did not pause after %ss" % (generator, original_timeout)
if self.debug:
print(msg)
raise WaitTimeoutError(msg) | Wait until generator is paused before running 'method'. |
def __run_git(cmd, path=None):
"""internal run git command
:param cmd: git parameters as array
:param path: path where command will be executed
:return: tuple (<line>, <returncode>)
"""
exe = [__get_git_bin()] + cmd
try:
proc = subprocess.Popen(exe, cwd=path, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except subprocess.CalledProcessError:
return None, None
except ValueError:
return None, None
except OSError:
return None, None
out, err = proc.communicate()
if IS_PYTHON3:
out = out.decode("utf-8")
if err:
print("Cmd ('%s') fails: %s" % (' '.join(exe), err))
return None, proc.returncode
return out.strip(), proc.returncode | internal run git command
:param cmd: git parameters as array
:param path: path where command will be executed
:return: tuple (<line>, <returncode>) |
def bulk_modify(self, *filters_or_records, **kwargs):
"""Shortcut to bulk modify records
.. versionadded:: 2.17.0
Args:
*filters_or_records (tuple) or (Record): Either a list of Records, or a list of filters.
Keyword Args:
values (dict): Dictionary of one or more 'field_name': 'new_value' pairs to update
Notes:
Requires Swimlane 2.17+
Examples:
::
# Bulk update records by filter
app.records.bulk_modify(
# Query filters
('Field_1', 'equals', value1),
('Field_2', 'equals', value2),
...
# New values for records
values={
"Field_3": value3,
"Field_4": value4,
...
}
)
# Bulk update records
record1 = app.records.get(tracking_id='APP-1')
record2 = app.records.get(tracking_id='APP-2')
record3 = app.records.get(tracking_id='APP-3')
app.records.bulk_modify(record1, record2, record3, values={"Field_Name": 'new value'})
Returns:
:class:`string`: Bulk Modify Job ID
"""
values = kwargs.pop('values', None)
if kwargs:
raise ValueError('Unexpected arguments: {}'.format(kwargs))
if not values:
raise ValueError('Must provide "values" as keyword argument')
if not isinstance(values, dict):
raise ValueError("values parameter must be dict of {'field_name': 'update_value'} pairs")
_type = validate_filters_or_records(filters_or_records)
request_payload = {}
record_stub = record_factory(self._app)
# build record_id list
if _type is Record:
request_payload['recordIds'] = [record.id for record in filters_or_records]
# build filters
else:
filters = []
for filter_tuples in filters_or_records:
field_name = record_stub.get_field(filter_tuples[0])
filters.append({
"fieldId": field_name.id,
"filterType": filter_tuples[1],
"value": field_name.get_report(filter_tuples[2])
})
request_payload['filters'] = filters
# Ensure all values are wrapped in a bulk modification operation, defaulting to Replace if not provided for
# backwards compatibility
for field_name in list(values.keys()):
modification_operation = values[field_name]
if not isinstance(modification_operation, _BulkModificationOperation):
values[field_name] = Replace(modification_operation)
# build modifications
modifications = []
for field_name, modification_operation in values.items():
# Lookup target field
modification_field = record_stub.get_field(field_name)
if not modification_field.bulk_modify_support:
raise ValueError("Field '{}' of Type '{}', is not supported for bulk modify".format(
field_name,
modification_field.__class__.__name__
))
modifications.append({
"fieldId": {
"value": modification_field.id,
"type": "id"
},
"value": modification_field.get_bulk_modify(modification_operation.value),
"type": modification_operation.type
})
request_payload['modifications'] = modifications
response = self._swimlane.request('put', "app/{0}/record/batch".format(self._app.id), json=request_payload)
# Update records if instances were used to submit bulk modify request after request was successful
if _type is Record:
for record in filters_or_records:
for field_name, modification_operation in six.iteritems(values):
record[field_name] = modification_operation.value
return response.text | Shortcut to bulk modify records
.. versionadded:: 2.17.0
Args:
*filters_or_records (tuple) or (Record): Either a list of Records, or a list of filters.
Keyword Args:
values (dict): Dictionary of one or more 'field_name': 'new_value' pairs to update
Notes:
Requires Swimlane 2.17+
Examples:
::
# Bulk update records by filter
app.records.bulk_modify(
# Query filters
('Field_1', 'equals', value1),
('Field_2', 'equals', value2),
...
# New values for records
values={
"Field_3": value3,
"Field_4": value4,
...
}
)
# Bulk update records
record1 = app.records.get(tracking_id='APP-1')
record2 = app.records.get(tracking_id='APP-2')
record3 = app.records.get(tracking_id='APP-3')
app.records.bulk_modify(record1, record2, record3, values={"Field_Name": 'new value'})
Returns:
:class:`string`: Bulk Modify Job ID |
Subsets and Splits