Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
900 | def set_attrs(obj, attrs):
o = setattr
if hasattr(obj, ):
o = type(obj).__setitem__
[o(obj, k, v) for k, v in attrs.iteritems()] | Applies a collection of attributes C{attrs} to object C{obj} in the most
generic way possible.
@param obj: An instance implementing C{__setattr__}, or C{__setitem__}
@param attrs: A collection implementing the C{iteritems} function
@type attrs: Usually a dict |
901 | def complete(self, match, subject_graph):
size = len(match)
if match.forward[size-1] in subject_graph.neighbors[match.forward[size-2]]:
order = list(range(0, size, 2)) + list(range(1, size-1, 2))[::-1]
ok = True
for i in range(len(order)//2):
count = len(list(subject_graph.iter_shortest_paths(
match.forward[order[i]],
match.forward[order[(i+size//2)%size]]
)))
if count > 1:
ok = False
break
count = len(list(subject_graph.iter_shortest_paths(
match.forward[order[i]],
match.forward[order[(i+size//2+1)%size]]
)))
if count > 1:
ok = False
break
if ok:
match.ring_vertices = tuple(match.forward[i] for i in order)
return True
paths = list(subject_graph.iter_shortest_paths(
match.forward[size-1],
match.forward[size-2]
))
if (size > 3 and len(paths) == 1 and len(paths[0]) == 3) or \
(size == 3 and len(paths) == 2 and len(paths[0]) == 3):
path = paths[0]
if size == 3 and path[1] == match.forward[0]:
path = paths[1]
match.add_relation(size, path[1])
size += 1
order = list(range(0, size, 2)) + list(range(size-1, 0, -2))
ok = True
for i in range(len(order)//2):
count = len(list(subject_graph.iter_shortest_paths(
match.forward[order[i]],
match.forward[order[(i+size//2)%size]]
)))
if count != 2:
ok = False
break
if ok:
if match.forward[size-1] < match.forward[0]:
ok = False
if not ok:
vertex1 = match.forward[size-1]
del match.forward[size-1]
del match.reverse[vertex1]
size -= 1
else:
match.ring_vertices = tuple(match.forward[i] for i in order)
return ok
return False | Check the completeness of a ring match |
902 | async def eap_options(request: web.Request) -> web.Response:
return web.json_response(EAP_CONFIG_SHAPE, status=200) | Get request returns the available configuration options for WPA-EAP.
Because the options for connecting to WPA-EAP secured networks are quite
complex, to avoid duplicating logic this endpoint returns a json object
describing the structure of arguments and options for the eap_config arg to
/wifi/configure.
The object is shaped like this:
{
options: [ // Supported EAP methods and their options. One of these
// method names must be passed in the eapConfig dict
{
name: str // i.e. TTLS-EAPMSCHAPv2. Should be in the eapType
// key of eapConfig when sent to /configure.
options: [
{
name: str // i.e. "username"
displayName: str // i.e. "Username"
required: bool,
type: str
}
]
}
]
}
The ``type`` keys denote the semantic kind of the argument. Valid types
are:
password: This is some kind of password. It may be a psk for the network,
an Active Directory password, or the passphrase for a private key
string: A generic string; perhaps a username, or a subject-matches
domain name for server validation
file: A file that the user must provide. This should be the id of a
file previously uploaded via POST /wifi/keys.
Although the arguments are described hierarchically, they should be
specified in eap_config as a flat dict. For instance, a /configure
invocation for TTLS/EAP-TLS might look like
```
POST
{
ssid: "my-ssid",
securityType: "wpa-eap",
hidden: false,
eapConfig : {
eapType: "TTLS/EAP-TLS", // One of the method options
identity: "[email protected]", // And then its arguments
anonymousIdentity: "[email protected]",
password: "testing123",
caCert: "12d1f180f081b",
phase2CaCert: "12d1f180f081b",
phase2ClientCert: "009909fd9fa",
phase2PrivateKey: "081009fbcbc"
phase2PrivateKeyPassword: "testing321"
}
}
``` |
903 | def description(pokemon):
r = requests.get( + (pokemon.descriptions.values()[0]))
desc = eval(r.text)[].replace(, )
return desc | Return a description of the given Pokemon. |
904 | def versionok_for_gui():
if sys.hexversion < 0x02060000:
return False
if sys.hexversion >= 0x03000000 and sys.hexversion < 0x03020000:
return False
if sys.platform.startswith("java") or sys.platform.startswith():
return False
return True | Return True if running Python is suitable for GUI Event Integration and deeper IPython integration |
905 | def transform(self, X, y=None):
check_is_fitted(self, "categories_")
X = self._check_array(X).copy()
categories = self.categories_
for k, dtype in categories.items():
if _HAS_CTD:
if not isinstance(dtype, pd.api.types.CategoricalDtype):
dtype = pd.api.types.CategoricalDtype(*dtype)
X[k] = X[k].astype(dtype)
else:
cat, ordered = dtype
X[k] = X[k].astype("category").cat.set_categories(cat, ordered)
return X | Transform the columns in ``X`` according to ``self.categories_``.
Parameters
----------
X : pandas.DataFrame or dask.DataFrame
y : ignored
Returns
-------
X_trn : pandas.DataFrame or dask.DataFrame
Same type as the input. The columns in ``self.categories_`` will
be converted to categorical dtype. |
906 | def subtract_months(self, months: int) -> datetime:
self.value = self.value - relativedelta(months=months)
return self.value | Subtracts a number of months from the current value |
907 | def take_at_least_n_seconds(time_s):
timeout = PolledTimeout(time_s)
yield
while not timeout.has_expired():
time.sleep(timeout.remaining) | A context manager which ensures it takes at least time_s to execute.
Example:
with take_at_least_n_seconds(5):
do.Something()
do.SomethingElse()
# if Something and SomethingElse took 3 seconds, the with block with sleep
# for 2 seconds before exiting.
Args:
time_s: The number of seconds this block should take. If it doesn't take at
least this time, then this method blocks during __exit__.
Yields:
To do some actions then on completion waits the remaining time. |
908 | def encrypt(self, s, mac_bytes=10):
if isinstance(s, six.text_type):
raise ValueError(
"Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
out = BytesIO()
with self.encrypt_to(out, mac_bytes) as f:
f.write(s)
return out.getvalue() | Encrypt `s' for this pubkey. |
909 | def enable(self, cmd="enable", pattern=r"
output = ""
if not self.check_enable_mode():
self.write_channel(self.normalize_cmd(cmd))
output += self.read_until_prompt_or_pattern(
pattern=pattern, re_flags=re_flags
)
if not self.check_enable_mode():
msg = (
"Failed to enter enable mode. Please ensure you pass "
"the argument to ConnectHandler."
)
raise ValueError(msg)
return output | Enable mode on MRV uses no password. |
910 | def update_repodata(self, channels=None):
norm_channels = self.conda_get_condarc_channels(channels=channels,
normalize=True)
repodata_urls = self._set_repo_urls_from_channels(norm_channels)
self._check_repos(repodata_urls) | Update repodata from channels or use condarc channels if None. |
911 | def gifs_trending_get(self, api_key, **kwargs):
kwargs[] = True
if kwargs.get():
return self.gifs_trending_get_with_http_info(api_key, **kwargs)
else:
(data) = self.gifs_trending_get_with_http_info(api_key, **kwargs)
return data | Trending GIFs Endpoint
Fetch GIFs currently trending online. Hand curated by the GIPHY editorial team. The data returned mirrors the GIFs showcased on the <a href = \"http://www.giphy.com\">GIPHY homepage</a>. Returns 25 results by default.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.gifs_trending_get(api_key, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str api_key: Giphy API Key. (required)
:param int limit: The maximum number of records to return.
:param str rating: Filters results by specified rating.
:param str fmt: Used to indicate the expected response format. Default is Json.
:return: InlineResponse200
If the method is called asynchronously,
returns the request thread. |
912 | def get_package_info(self, name):
if self._disable_cache:
return self._get_package_info(name)
return self._cache.store("packages").remember_forever(
name, lambda: self._get_package_info(name)
) | Return the package information given its name.
The information is returned from the cache if it exists
or retrieved from the remote server. |
913 | def t_offset(self, s):
r
pos = self.pos
self.add_token(, s)
self.pos = pos + len(s) | r'[+]\d+ |
914 | def move(self, auth, resource, destinationresource, options={"aliases": True}, defer=False):
return self._call(, auth, [resource, destinationresource, options], defer) | Moves a resource from one parent client to another.
Args:
auth: <cik>
resource: Identifed resource to be moved.
destinationresource: resource of client resource is being moved to. |
915 | def in_nested_list(nested_list, obj):
for elmt in nested_list:
if isinstance(elmt, (list, tuple)):
if in_nested_list(elmt, obj):
return True
elif elmt == obj:
return True
return False | return true if the object is an element of <nested_list> or of a nested
list |
916 | def dump(self, validate=True):
if validate:
self.validate()
return bencode(self.convert()) | Create bencoded :attr:`metainfo` (i.e. the content of a torrent file)
:param bool validate: Whether to run :meth:`validate` first
:return: :attr:`metainfo` as bencoded :class:`bytes` |
917 | def parse(response):
if response.status_code == 400:
try:
msg = json.loads(response.content)[]
except (KeyError, ValueError):
msg =
raise ApiError(msg)
return response | check for errors |
918 | def stop_request(self, stop_now=False):
logger.debug("Sending stop request to %s, stop now: %s", self.name, stop_now)
res = self.con.get(, {: if stop_now else })
return res | Send a stop request to the daemon
:param stop_now: stop now or go to stop wait mode
:type stop_now: bool
:return: the daemon response (True) |
919 | def install_var(instance, clear_target, clear_all):
_check_root()
log("Checking frontend library and cache directories",
emitter=)
uid = pwd.getpwnam("hfos").pw_uid
gid = grp.getgrnam("hfos").gr_gid
join = os.path.join
log("Cleaning up: " + item, lvl=warn)
shutil.rmtree(item)
if not os.path.exists(item):
log("Creating path: " + item)
os.mkdir(item)
os.chown(item, uid, gid)
open(logfile, "a").close()
os.chown(logfile, uid, gid)
log("Done: Install Var") | Install required folders in /var |
920 | def train(self, s, path="spelling.txt"):
model = {}
for w in re.findall("[a-z]+", s.lower()):
model[w] = w in model and model[w] + 1 or 1
model = ("%s %s" % (k, v) for k, v in sorted(model.items()))
model = "\n".join(model)
f = open(path, "w")
f.write(model)
f.close() | Counts the words in the given string and saves the probabilities at the given path.
This can be used to generate a new model for the Spelling() constructor. |
921 | def unique_append(self, value):
if value not in self:
try:
super(self.__class__, self).append(Uri(value))
except AttributeError as err:
if isinstance(value, MODULE.rdfclass.RdfClassBase):
super(self.__class__, self).append(value)
else:
raise err | function for only appending unique items to a list.
#! consider the possibility of item using this to a set |
922 | def _command(self, sock_info, command, slave_ok=False,
read_preference=None,
codec_options=None, check=True, allowable_errors=None,
read_concern=None,
write_concern=None,
collation=None,
session=None,
retryable_write=False,
user_fields=None):
with self.__database.client._tmp_session(session) as s:
return sock_info.command(
self.__database.name,
command,
slave_ok,
read_preference or self._read_preference_for(session),
codec_options or self.codec_options,
check,
allowable_errors,
read_concern=read_concern,
write_concern=write_concern,
parse_write_concern_error=True,
collation=collation,
session=s,
client=self.__database.client,
retryable_write=retryable_write,
user_fields=user_fields) | Internal command helper.
:Parameters:
- `sock_info` - A SocketInfo instance.
- `command` - The command itself, as a SON instance.
- `slave_ok`: whether to set the SlaveOkay wire protocol bit.
- `codec_options` (optional) - An instance of
:class:`~bson.codec_options.CodecOptions`.
- `check`: raise OperationFailure if there are errors
- `allowable_errors`: errors to ignore if `check` is True
- `read_concern` (optional) - An instance of
:class:`~pymongo.read_concern.ReadConcern`.
- `write_concern`: An instance of
:class:`~pymongo.write_concern.WriteConcern`. This option is only
valid for MongoDB 3.4 and above.
- `collation` (optional) - An instance of
:class:`~pymongo.collation.Collation`.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `retryable_write` (optional): True if this command is a retryable
write.
- `user_fields` (optional): Response fields that should be decoded
using the TypeDecoders from codec_options, passed to
bson._decode_all_selective.
:Returns:
The result document. |
923 | def usePointsForInterpolation(self,cNrm,mNrm,interpolator):
s consumption-saving problem, with a
consumption function, marginal value function, and minimum m.
'
cFuncNowUnc = interpolator(mNrm,cNrm)
cFuncNow = LowerEnvelope(cFuncNowUnc,self.cFuncNowCnst)
vPfuncNow = MargValueFunc(cFuncNow,self.CRRA)
solution_now = ConsumerSolution(cFunc=cFuncNow, vPfunc=vPfuncNow, mNrmMin=self.mNrmMinNow)
return solution_now | Constructs a basic solution for this period, including the consumption
function and marginal value function.
Parameters
----------
cNrm : np.array
(Normalized) consumption points for interpolation.
mNrm : np.array
(Normalized) corresponding market resource points for interpolation.
interpolator : function
A function that constructs and returns a consumption function.
Returns
-------
solution_now : ConsumerSolution
The solution to this period's consumption-saving problem, with a
consumption function, marginal value function, and minimum m. |
924 | def _validate_authority_uri_abs_path(host, path):
if len(host) > 0 and len(path) > 0 and not path.startswith("/"):
raise ValueError(
"Path in a URL with authority " "should start with a slash () if set"
) | Ensure that path in URL with authority starts with a leading slash.
Raise ValueError if not. |
925 | def _run_command(argv):
command_name, argv = _get_command_and_argv(argv)
_LOGGER.info(, settings.command,
command_name, argv)
subcommand = _get_subcommand(command_name)
func = call.get_callable(subcommand)
doc = usage.format_usage(subcommand.__doc__)
args = _get_parsed_args(command_name, doc, argv)
return call.call(func, args) or 0 | Run the command with the given CLI options and exit.
Command functions are expected to have a __doc__ string that is parseable
by docopt.
Args:
argv: The list of command line arguments supplied for a command. The
first argument is expected to be the name of the command to be run.
Note that this is different than the full arguments parsed by
docopt for the entire program.
Raises:
ValueError: Raised if the user attempted to run an invalid command. |
926 | def post(self, text=None, attachments=None, source_guid=None):
return self.messages.create(text=text, attachments=attachments,
source_guid=source_guid) | Post a direct message to the user.
:param str text: the message content
:param attachments: message attachments
:param str source_guid: a client-side unique ID for the message
:return: the message sent
:rtype: :class:`~groupy.api.messages.DirectMessage` |
927 | def _mmComputeSequenceRepresentationData(self):
if not self._sequenceRepresentationDataStale:
return
unionSDRTrace = self.mmGetTraceUnionSDR()
sequenceLabelsTrace = self.mmGetTraceSequenceLabels()
resetsTrace = self.mmGetTraceResets()
n = len(unionSDRTrace.data)
overlapMatrix = numpy.empty((n, n), dtype=uintType)
stabilityConfusionUnionSDR = []
distinctnessConfusionUnionSDR = []
for i in xrange(n):
for j in xrange(i+1):
overlapUnionSDR = len(unionSDRTrace.data[i] & unionSDRTrace.data[j])
overlapMatrix[i][j] = overlapUnionSDR
overlapMatrix[j][i] = overlapUnionSDR
if (i != j and
sequenceLabelsTrace.data[i] is not None and
not resetsTrace.data[i] and
sequenceLabelsTrace.data[j] is not None and
not resetsTrace.data[j]):
if sequenceLabelsTrace.data[i] == sequenceLabelsTrace.data[j]:
stabilityConfusionUnionSDR.append(overlapUnionSDR)
else:
distinctnessConfusionUnionSDR.append(overlapUnionSDR)
self._mmData["overlap"] = overlapMatrix
self._mmData["stabilityConfusion"] = stabilityConfusionUnionSDR
self._mmData["distinctnessConfusion"] = distinctnessConfusionUnionSDR
self._sequenceRepresentationDataStale = False | Calculates values for the overlap distance matrix, stability within a
sequence, and distinctness between sequences. These values are cached so
that they do need to be recomputed for calls to each of several accessor
methods that use these values. |
928 | def predefinedEntity(name):
ret = libxml2mod.xmlGetPredefinedEntity(name)
if ret is None:raise treeError()
return xmlEntity(_obj=ret) | Check whether this name is an predefined entity. |
929 | def get_work_items_batch(self, work_item_get_request, project=None):
route_values = {}
if project is not None:
route_values[] = self._serialize.url(, project, )
content = self._serialize.body(work_item_get_request, )
response = self._send(http_method=,
location_id=,
version=,
route_values=route_values,
content=content)
return self._deserialize(, self._unwrap_collection(response)) | GetWorkItemsBatch.
Gets work items for a list of work item ids (Maximum 200)
:param :class:`<WorkItemBatchGetRequest> <azure.devops.v5_0.work_item_tracking.models.WorkItemBatchGetRequest>` work_item_get_request:
:param str project: Project ID or project name
:rtype: [WorkItem] |
930 | def _finish(self, update_ops, name_scope):
iter_ = self._get_iter_variable()
beta1_power, beta2_power = self._get_beta_accumulators()
with tf.control_dependencies(update_ops):
with tf.colocate_with(iter_):
def update_beta_op():
update_beta1 = beta1_power.assign(
beta1_power * self._beta1_t,
use_locking=self._use_locking)
update_beta2 = beta2_power.assign(
beta2_power * self._beta2_t,
use_locking=self._use_locking)
return tf.group(update_beta1, update_beta2)
maybe_update_beta = tf.cond(
tf.equal(iter_, 0), update_beta_op, tf.no_op)
with tf.control_dependencies([maybe_update_beta]):
update_iter = iter_.assign(tf.mod(iter_ + 1, self._n_t),
use_locking=self._use_locking)
return tf.group(
*update_ops + [update_iter, maybe_update_beta], name=name_scope) | Updates beta_power variables every n batches and incrs counter. |
931 | def grant_client(self, client_id, read=True, write=True):
scopes = []
authorities = []
if write:
for zone in self.service.settings.data[][]:
scopes.append(zone)
authorities.append(zone)
if read:
for zone in self.service.settings.data[][]:
scopes.append(zone)
authorities.append(zone)
self.service.uaa.uaac.update_client_grants(client_id, scope=scopes,
authorities=authorities)
return self.service.uaa.uaac.get_client(client_id) | Grant the given client id all the scopes and authorities
needed to work with the timeseries service. |
932 | def get_data_excel_xml(file_name, file_contents=None, on_demand=False):
if file_contents:
xml_file = BytesIO(file_contents)
else:
xml_file = file_name
book = xmlparse.ParseExcelXMLFile(xml_file)
row_builder = lambda s, r: list(s.row_values(r))
return [XMLSheetYielder(book, index, row_builder) for index in range(len(book))] | Loads xml excel format files.
Args:
file_name: The name of the local file, or the holder for the
extension type when the file_contents are supplied.
file_contents: The file-like object holding contents of file_name.
If left as None, then file_name is directly loaded.
on_demand: Requests that a yielder be used in place of a full data
copy (will be ignored). |
933 | def event_notify(self, event):
if event.name not in self.available_events:
return
message = json.dumps({
: ,
: event.as_event_description(),
})
for subscriber in self.available_events[event.name][]:
try:
subscriber.write_message(message)
except tornado.websocket.WebSocketClosedError:
pass | Notify all subscribers of an event.
event -- the event that occurred |
934 | def has_event(self, event, cameo_code):
if self.has_cameo_code(cameo_code):
entry = self.mapping.get(cameo_code)
if entry:
return entry[self.event_name[event]]
return False | Test whether there is an "event2" or "event3" entry for the given cameo code
Args:
event:
cameo_code:
Returns: |
935 | def add_call(self, func):
self.trace.append("{} ({}:{})".format(
object_name(func),
inspect.getsourcefile(func),
inspect.getsourcelines(func)[1]))
return self | Add a call to the trace. |
936 | def app_size(self):
"Return the total apparent size, including children."
if self._nodes is None:
return self._app_size
return sum(i.app_size() for i in self._nodes) | Return the total apparent size, including children. |
937 | def to_dict(self):
obj = {
: self.identifier,
: self.name,
: self.description,
: self.data_type.to_dict()
}
if not self.default is None:
obj[] = self.default
return obj | Convert attribute definition into a dictionary.
Returns
-------
dict
Json-like dictionary representation of the attribute definition |
938 | async def _buffer_body(self, reader):
remaining = int(self.headers.get(, 0))
if remaining > 0:
try:
self.data = await reader.readexactly(remaining)
except asyncio.IncompleteReadError:
raise EOFError() | Buffers the body of the request |
939 | def _get_default_arg(args, defaults, arg_index):
if not defaults:
return DefaultArgSpec(False, None)
args_with_no_defaults = len(args) - len(defaults)
if arg_index < args_with_no_defaults:
return DefaultArgSpec(False, None)
else:
value = defaults[arg_index - args_with_no_defaults]
if (type(value) is str):
value = % value
return DefaultArgSpec(True, value) | Method that determines if an argument has default value or not,
and if yes what is the default value for the argument
:param args: array of arguments, eg: ['first_arg', 'second_arg', 'third_arg']
:param defaults: array of default values, eg: (42, 'something')
:param arg_index: index of the argument in the argument array for which,
this function checks if a default value exists or not. And if default value
exists it would return the default value. Example argument: 1
:return: Tuple of whether there is a default or not, and if yes the default
value, eg: for index 2 i.e. for "second_arg" this function returns (True, 42) |
940 | def draw_simple_elevation(world, sea_level, target):
e = world.layers[].data
c = numpy.empty(e.shape, dtype=numpy.float)
has_ocean = not (sea_level is None or world.layers[].data is None or not world.layers[].data.any())
mask_land = numpy.ma.array(e, mask=world.layers[].data if has_ocean else False)
min_elev_land = mask_land.min()
max_elev_land = mask_land.max()
elev_delta_land = (max_elev_land - min_elev_land) / 11.0
if has_ocean:
land = numpy.logical_not(world.layers[].data)
mask_ocean = numpy.ma.array(e, mask=land)
min_elev_sea = mask_ocean.min()
max_elev_sea = mask_ocean.max()
elev_delta_sea = max_elev_sea - min_elev_sea
c[world.layers[].data] = ((e[world.layers[].data] - min_elev_sea) / elev_delta_sea)
c[land] = ((e[land] - min_elev_land) / elev_delta_land) + 1
else:
c = ((e - min_elev_land) / elev_delta_land) + 1
for y in range(world.height):
for x in range(world.width):
r, g, b = elevation_color(c[y, x], sea_level)
target.set_pixel(x, y, (int(r * 255), int(g * 255),
int(b * 255), 255)) | This function can be used on a generic canvas (either an image to save
on disk or a canvas part of a GUI) |
941 | def _index_param_value(num_samples, v, indices):
if not _is_arraylike(v) or _num_samples(v) != num_samples:
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices) | Private helper function for parameter value indexing.
This determines whether a fit parameter `v` to a SearchCV.fit
should be indexed along with `X` and `y`. Note that this differs
from the scikit-learn version. They pass `X` and compute num_samples.
We pass `num_samples` instead. |
942 | async def request(self, command, payload, retry=3):
if self._token is None:
_LOGGER.error("No token")
return None
_LOGGER.debug(command, payload)
nonce = .join(random.choice(string.ascii_uppercase + string.digits) for _ in range(16))
url = API_ENDPOINT_2 + command
timestamp = int(time.time())
signature = hashlib.sha1(str(REQUEST_TIMEOUT
+ str(timestamp)
+ nonce
+ self._token).encode("utf-8")).hexdigest()
headers = {
"Content-Type": "application/x-zc-object",
"Connection": "Keep-Alive",
"X-Zc-Major-Domain": "seanywell",
"X-Zc-Msg-Name": "millService",
"X-Zc-Sub-Domain": "milltype",
"X-Zc-Seq-Id": "1",
"X-Zc-Version": "1",
"X-Zc-Timestamp": str(timestamp),
"X-Zc-Timeout": REQUEST_TIMEOUT,
"X-Zc-Nonce": nonce,
"X-Zc-User-Id": str(self._user_id),
"X-Zc-User-Signature": signature,
"X-Zc-Content-Length": str(len(payload)),
}
try:
with async_timeout.timeout(self._timeout):
resp = await self.websession.post(url,
data=json.dumps(payload),
headers=headers)
except asyncio.TimeoutError:
if retry < 1:
_LOGGER.error("Timed out sending command to Mill: %s", command)
return None
return await self.request(command, payload, retry - 1)
except aiohttp.ClientError:
_LOGGER.error("Error sending command to Mill: %s", command, exc_info=True)
return None
result = await resp.text()
_LOGGER.debug(result)
if not result or result == :
return None
if in result or in result:
if retry < 1:
return None
if not await self.connect():
return None
return await self.request(command, payload, retry - 1)
if in result:
if retry < 1:
_LOGGER.error("Failed to send request, %s", result)
return None
_LOGGER.debug("Failed to send request, %s. Retrying...", result)
await asyncio.sleep(3)
return await self.request(command, payload, retry - 1)
if in result:
_LOGGER.error("Failed to send request, %s", result)
return None
data = json.loads(result)
return data | Request data. |
943 | def update_firmware(filename,
host=None,
admin_username=None,
admin_password=None):
if os.path.exists(filename):
return _update_firmware(.format(filename),
host=None,
admin_username=None,
admin_password=None)
else:
raise CommandExecutionError(
.format(filename)) | Updates firmware using local firmware file
.. code-block:: bash
salt dell dracr.update_firmware firmware.exe
This executes the following command on your FX2
(using username and password stored in the pillar data)
.. code-block:: bash
racadm update –f firmware.exe -u user –p pass |
944 | def format(sql, args=None):
resolved_vars = {}
code = []
SqlStatement._find_recursive_dependencies(sql, args, code=code,
resolved_vars=resolved_vars)
parts = []
for (escape, placeholder, _, literal) in SqlStatement._get_tokens(sql):
if escape:
parts.append()
elif placeholder:
variable = placeholder[1:]
try:
value = resolved_vars[variable]
except KeyError as e:
raise Exception( % e.args[0])
if isinstance(value, types.ModuleType):
value = _utils.get_default_query_from_module(value)
if isinstance(value, SqlStatement):
sql = value.format(value._sql, resolved_vars)
value = % sql
elif in dir(value):
value = value._repr_sql_()
elif isinstance(value, basestring):
value = SqlStatement._escape_string(value)
elif isinstance(value, list) or isinstance(value, tuple):
if isinstance(value, tuple):
value = list(value)
expansion =
for v in value:
if len(expansion) > 1:
expansion +=
if isinstance(v, basestring):
expansion += SqlStatement._escape_string(v)
else:
expansion += str(v)
expansion +=
value = expansion
else:
value = str(value)
parts.append(value)
elif literal:
parts.append(literal)
expanded = .join(parts)
return expanded | Resolve variable references in a query within an environment.
This computes and resolves the transitive dependencies in the query and raises an
exception if that fails due to either undefined or circular references.
Args:
sql: query to format.
args: a dictionary of values to use in variable expansion.
Returns:
The resolved SQL text with variables expanded.
Raises:
Exception on failure. |
945 | def _by_columns(self, columns):
return columns if self.isstr(columns) else self._backtick_columns(columns) | Allow select.group and select.order accepting string and list |
946 | def read_pipe(pipe_out):
out = b
while more_data(pipe_out):
out += os.read(pipe_out, 1024)
return out.decode() | Read data on a pipe
Used to capture stdout data produced by libiperf
:param pipe_out: The os pipe_out
:rtype: unicode string |
947 | def _bounds_dist(self, p):
prob = self.problem
lb_dist = (p - prob.variable_bounds[0, ]).min()
ub_dist = (prob.variable_bounds[1, ] - p).min()
if prob.bounds.shape[0] > 0:
const = prob.inequalities.dot(p)
const_lb_dist = (const - prob.bounds[0, ]).min()
const_ub_dist = (prob.bounds[1, ] - const).min()
lb_dist = min(lb_dist, const_lb_dist)
ub_dist = min(ub_dist, const_ub_dist)
return np.array([lb_dist, ub_dist]) | Get the lower and upper bound distances. Negative is bad. |
948 | def update_received_packet(self, received_pkt_size_bytes):
self.update_count(self.RECEIVED_PKT_COUNT)
self.update_count(self.RECEIVED_PKT_SIZE, incr_by=received_pkt_size_bytes) | Update received packet metrics |
949 | def get(self, id, **options):
if not self._item_path:
raise AttributeError( % self._item_name)
target = self._item_path % id
json_data = self._redmine.get(target, **options)
data = self._redmine.unwrap_json(self._item_type, json_data)
data[] = target
return self._objectify(data=data) | Get a single item with the given ID |
950 | def _dy_shapelets(self, shapelets, beta):
num_n = len(shapelets)
dy = np.zeros((num_n+1, num_n+1))
for n1 in range(num_n):
for n2 in range(num_n):
amp = shapelets[n1][n2]
dy[n1][n2+1] -= np.sqrt((n2+1)/2.) * amp
if n2 > 0:
dy[n1][n2-1] += np.sqrt(n2/2.) * amp
return dy/beta | computes the derivative d/dx of the shapelet coeffs
:param shapelets:
:param beta:
:return: |
951 | def disable_share(cookie, tokens, shareid_list):
url = .join([
const.PAN_URL,
,
, tokens[],
])
data = + encoder.encode_uri(json.dumps(shareid_list))
req = net.urlopen(url, headers={
: cookie.header_output(),
: const.CONTENT_FORM_UTF8,
}, data=data.encode())
if req:
content = req.data
return json.loads(content.decode())
else:
return None | 取消分享.
shareid_list 是一个list, 每一项都是一个shareid |
952 | def getAceTypeBit(self, t):
try:
return self.validAceTypes[t][]
except KeyError:
raise CommandExecutionError((
).format(t, .join(self.validAceTypes))) | returns the acetype bit of a text value |
953 | def logs(self, pod=None):
if pod is None:
return {pod.status.pod_ip: self.logs(pod) for pod in self.pods()}
return self.core_api.read_namespaced_pod_log(pod.metadata.name,
pod.metadata.namespace) | Logs from a worker pod
You can get this pod object from the ``pods`` method.
If no pod is specified all pod logs will be returned. On large clusters
this could end up being rather large.
Parameters
----------
pod: kubernetes.client.V1Pod
The pod from which we want to collect logs.
See Also
--------
KubeCluster.pods
Client.get_worker_logs |
954 | def latitude(self):
sd = dm_to_sd(self.lat)
if self.lat_dir == :
return +sd
elif self.lat_dir == :
return -sd
else:
return 0. | Latitude in signed degrees (python float) |
955 | def __calculate_bu_dfs_recursively(u, b, dfs_data):
first_time = True
for v in dfs_data[][u]:
if a(v, dfs_data) == u:
if first_time:
b[v] = b[u]
else:
b[v] = D(u, dfs_data)
__calculate_bu_dfs_recursively(v, b, dfs_data)
first_time = False | Calculates the b(u) lookup table with a recursive DFS. |
956 | def _get_unitary(self):
unitary = np.reshape(self._unitary, 2 * [2 ** self._number_of_qubits])
unitary = np.stack((unitary.real, unitary.imag), axis=-1)
unitary[abs(unitary) < self._chop_threshold] = 0.0
return unitary | Return the current unitary in JSON Result spec format |
957 | def estimate_K_knee(self, th=.015, maxK=12):
if self.X.shape[0] < maxK:
maxK = self.X.shape[0]
if maxK < 2:
maxK = 2
K = np.arange(1, maxK)
bics = []
for k in K:
means, labels = self.run_kmeans(self.X, k)
bic = self.compute_bic(self.X, means, labels, K=k,
R=self.X.shape[0])
bics.append(bic)
diff_bics = np.diff(bics)
finalK = K[-1]
if len(bics) == 1:
finalK = 2
else:
bics = np.asarray(bics)
bics -= bics.min()
diff_bics -= diff_bics.min()
for i in range(len(K[:-1])):
if diff_bics[i] < th and K[i] != 1:
finalK = K[i]
break
if self.plot:
plt.subplot(2, 1, 1)
plt.plot(K, bics, label="BIC")
plt.plot(K[:-1], diff_bics, label="BIC diff")
plt.legend(loc=2)
plt.subplot(2, 1, 2)
plt.scatter(self.X[:, 0], self.X[:, 1])
plt.show()
return finalK | Estimates the K using K-means and BIC, by sweeping various K and
choosing the optimal BIC. |
958 | def validate_query(self, using=None, **kwargs):
return self._get_connection(using).indices.validate_query(index=self._name, **kwargs) | Validate a potentially expensive query without executing it.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.validate_query`` unchanged. |
959 | def jprecess(ra, dec, mu_radec=None, parallax=None, rad_vel=None, epoch=None):
if isinstance(ra, ndarray):
ra = array(ra)
dec = array(dec)
else:
ra = array([ra0])
dec = array([dec0])
n = ra.size
if rad_vel is None:
rad_vel = zeros(n,dtype=float)
else:
if not isinstance(rad_vel, ndarray):
rad_vel = array([rad_vel],dtype=float)
if rad_vel.size != n:
raise Exception()
if (mu_radec is not None):
if (array(mu_radec).size != 2 * n):
raise Exception( + strtrim(n, 2) + )
mu_radec = mu_radec * 1.
if parallax is None:
parallax = zeros(n,dtype=float)
else:
if not isinstance(parallax, ndarray):
parallax = array([parallax],dtype=float)
if epoch is None:
epoch = 1950.0e0
radeg = 180.e0 / pi
sec_to_radian = 1/radeg/3600.
m = array([
array([+0.9999256782e0, +0.0111820610e0, +0.0048579479e0, \
-0.000551e0, +0.238514e0, -0.435623e0 ]),
array([ -0.0111820611e0, +0.9999374784e0, -0.0000271474e0, \
-0.238565e0, -0.002667e0, +0.012254e0 ]),
array([ -0.0048579477e0, -0.0000271765e0, +0.9999881997e0 , \
+0.435739e0, -0.008541e0, +0.002117e0 ]),
array([ +0.00000242395018e0, +0.00000002710663e0, +0.00000001177656e0, \
+0.99994704e0, +0.01118251e0, +0.00485767e0 ]),
array([ -0.00000002710663e0, +0.00000242397878e0, -0.00000000006582e0, \
-0.01118251e0, +0.99995883e0, -0.00002714e0]),
array([ -0.00000001177656e0, -0.00000000006587e0, 0.00000242410173e0, \
-0.00485767e0, -0.00002718e0, 1.00000956e0 ]) ])
a = 1e-6 * array([ -1.62557e0, -0.31919e0, -0.13843e0])
a_dot = 1e-3 * array([1.244e0, -1.579e0, -0.660e0 ])
ra_rad = deg2rad(ra)
dec_rad = deg2rad(dec)
cosra = cos(ra_rad)
sinra = sin(ra_rad)
cosdec = cos(dec_rad)
sindec = sin(dec_rad)
ra_2000 = ra*0.
dec_2000 = dec*0.
for i in range(n):
r0 = array([ cosra[i]*cosdec[i], sinra[i]*cosdec[i], sindec[i] ])
if (mu_radec is None):
mu_a = 0.
mu_d = 0.
else:
mu_a = mu_radec[ i, 0]
mu_d = mu_radec[ i, 1]
r0_dot = array([-mu_a*sinra[i]*cosdec[i] - mu_d*cosra[i]*sindec[i], \
mu_a*cosra[i]*cosdec[i] - mu_d*sinra[i]*sindec[i] , \
mu_d*cosdec[i] ]) + 21.095e0 * rad_vel[i] * parallax[i] * r0
r1 = r0 - a + ((a * r0).sum())*r0
r1_dot = r0_dot - a_dot + (( a * r0).sum())*r0
r_1 = concatenate((r1, r1_dot))
r = transpose(dot(transpose(m),transpose(r_1)))
if mu_radec is None:
rr = r[0:3]
v = r[3:6]
t = ((epoch - 1950.0e0) - 50.00021e0)/100.0
rr1 = rr + sec_to_radian*v*t
x = rr1[0] ; y = rr1[1] ; z = rr1[2]
else:
x = r[0] ; y = r[1] ; z = r[2]
x_dot = r[3] ; y_dot= r[4] ; z_dot = r[5]
r2 = x**2 + y**2 + z**2
rmag = sqrt( r2 )
dec_2000[i] = arcsin(z / rmag)
ra_2000[i] = arctan2(y, x)
if mu_radec is not None:
mu_radec[i, 0] = ( x*y_dot - y*x_dot) / ( x**2 + y**2)
mu_radec[i, 1] = ( z_dot* (x**2 + y**2) - z*(x*x_dot + y*y_dot) ) / \
( r2*sqrt( x**2 + y**2) )
if parallax[i] > 0.:
rad_vel[i] = ( x*x_dot + y*y_dot + z*z_dot )/ (21.095*parallax[i]*rmag)
parallax[i] = parallax[i] / rmag
neg = (ra_2000 < 0)
if neg.any() > 0:
ra_2000[neg] = ra_2000[neg] + 2.0 * pi
ra_2000 = ra_2000*radeg ; dec_2000 = dec_2000*radeg
if ra.size == 1:
ra_2000 = ra_2000[0] ; dec_2000 = dec_2000[0]
return ra_2000, dec_2000 | NAME:
JPRECESS
PURPOSE:
Precess astronomical coordinates from B1950 to J2000
EXPLANATION:
Calculate the mean place of a star at J2000.0 on the FK5 system from the
mean place at B1950.0 on the FK4 system.
Use BPRECESS for the reverse direction J2000 ==> B1950
CALLING SEQUENCE:
jprecess, ra, dec, ra_2000, dec_2000, [ MU_RADEC = , PARALLAX =
RAD_VEL =, EPOCH = ]
INPUTS:
RA,DEC - input B1950 right ascension and declination in *degrees*.
Scalar or vector
OUTPUTS:
RA_2000, DEC_2000 - the corresponding J2000 right ascension and
declination in *degrees*. Same number of elements as RA,DEC
but always double precision.
OPTIONAL INPUT-OUTPUT KEYWORDS
MU_RADEC - 2xN element double precision vector containing the proper
motion in seconds of arc per tropical *century* in right
ascension and declination.
PARALLAX - N_element vector giving stellar parallax (seconds of arc)
RAD_VEL - N_element vector giving radial velocity in km/s
The values of MU_RADEC, PARALLAX, and RADVEL will all be modified
upon output to contain the values of these quantities in the
J2000 system. Values will also be converted to double precision.
The parallax and radial velocity will have a very minor influence on
the J2000 position.
EPOCH - scalar giving epoch of original observations, default 1950.0d
This keyword value is only used if the MU_RADEC keyword is not set.
NOTES:
The algorithm is taken from the Explanatory Supplement to the
Astronomical Almanac 1992, page 184.
Also see Aoki et al (1983), A&A, 128,263
JPRECESS distinguishes between the following two cases:
(1) The proper motion is known and non-zero
(2) the proper motion is unknown or known to be exactly zero (i.e.
extragalactic radio sources). In this case, the algorithm
in Appendix 2 of Aoki et al. (1983) is used to ensure that
the output proper motion is exactly zero. Better precision
can be achieved in this case by inputting the EPOCH of the
original observations.
The error in using the IDL procedure PRECESS for converting between
B1950 and J2000 can be up to 12", mainly in right ascension. If
better accuracy than this is needed then JPRECESS should be used.
EXAMPLE:
The SAO catalogue gives the B1950 position and proper motion for the
star HD 119288. Find the J2000 position.
RA(1950) = 13h 39m 44.526s Dec(1950) = 8d 38' 28.63''
Mu(RA) = -.0259 s/yr Mu(Dec) = -.093 ''/yr
IDL> mu_radec = 100D* [ -15D*.0259, -0.093 ]
IDL> ra = ten(13,39,44.526)*15.D
IDL> dec = ten(8,38,28.63)
IDL> jprecess, ra, dec, ra2000, dec2000, mu_radec = mu_radec
IDL> print, adstring(ra2000, dec2000,2)
===> 13h 42m 12.740s +08d 23' 17.69"
RESTRICTIONS:
"When transferring individual observations, as opposed to catalog mean
place, the safest method is to tranform the observations back to the
epoch of the observation, on the FK4 system (or in the system that was
used to to produce the observed mean place), convert to the FK5 system,
and transform to the the epoch and equinox of J2000.0" -- from the
Explanatory Supplement (1992), p. 180
REVISION HISTORY:
Written, W. Landsman September, 1992
Corrected a couple of typos in M matrix October, 1992
Vectorized, W. Landsman February, 1994
Implement Appendix 2 of Aoki et al. (1983) for case where proper
motion unknown or exactly zero W. Landsman November, 1994
Converted to IDL V5.0 W. Landsman September 1997
Fixed typo in updating proper motion W. Landsman April 1999
Make sure proper motion is floating point W. Landsman December 2000
Use V6.0 notation W. Landsman Mar 2011
Converted to python by A. Drlica-Wagner Feb 2014 |
960 | def equals(val1, val2):
if len(val1) != len(val2):
return False
result = 0
for x, y in zip(val1, val2):
result |= ord(x) ^ ord(y)
return result == 0 | Returns True if the two strings are equal, False otherwise.
The time taken is independent of the number of characters that match.
For the sake of simplicity, this function executes in constant time only
when the two strings have the same length. It short-circuits when they
have different lengths. |
961 | def _remove_code(site):
def handle_error(function, path, excinfo):
click.secho(.format(em=excinfo.message, p=path), err=True, fg=)
if os.path.exists(site.root):
shutil.rmtree(site.root, onerror=handle_error) | Delete project files
@type site: Site |
962 | def _create_ucsm_host_to_service_profile_mapping(self):
ucsm_ips = [ip for ip, ucsm in CONF.ml2_cisco_ucsm.ucsms.items()
if not ucsm.ucsm_host_list]
for ucsm_ip in ucsm_ips:
with self.ucsm_connect_disconnect(ucsm_ip) as handle:
try:
sp_list = handle.query_classid()
if sp_list is not None:
for sp in sp_list:
if sp.pn_dn:
server_name = handle.query_dn(sp.pn_dn).name
if (server_name and not
sp.oper_src_templ_name):
LOG.debug(
, server_name, ucsm_ip)
key = (ucsm_ip, server_name)
self.ucsm_sp_dict[key] = str(sp.dn)
self.ucsm_host_dict[server_name] = ucsm_ip
except Exception as e:
raise cexc.UcsmConfigReadFailed(ucsm_ip=ucsm_ip, exc=e) | Reads list of Service profiles and finds associated Server. |
963 | def calculate_lvgd_stats(nw):
proj = partial(
pyproj.transform,
pyproj.Proj(init=),
pyproj.Proj(init=))
nw.control_circuit_breakers(mode=)
lv_dist_idx = 0
lv_dist_dict = {}
lv_gen_idx = 0
lv_gen_dict = {}
lv_load_idx = 0
lv_load_dict = {}
branch_idx = 0
branches_dict = {}
trafos_idx = 0
trafos_dict = {}
for mv_district in nw.mv_grid_districts():
for LA in mv_district.lv_load_areas():
for lv_district in LA.lv_grid_districts():
lv_dist_idx += 1
branches_from_station = len(lv_district.lv_grid.graph_branches_from_node(lv_district.lv_grid.station()))
lv_dist_dict[lv_dist_idx] = {
: mv_district.mv_grid.id_db,
: lv_district.lv_grid.id_db,
: LA.id_db,
: lv_district.population,
: lv_district.peak_load_residential,
: lv_district.peak_load_retail,
: lv_district.peak_load_industrial,
: lv_district.peak_load_agricultural,
: lv_district.sector_count_residential,
: lv_district.sector_count_retail,
: lv_district.sector_count_industrial,
: lv_district.sector_count_agricultural,
: lv_district.sector_consumption_residential,
: lv_district.sector_consumption_retail,
: lv_district.sector_consumption_industrial,
: lv_district.sector_consumption_agricultural,
: branches_from_station,
: LA.is_aggregated,
: LA.is_satellite,
}
for g in lv_district.lv_grid.generators():
lv_gen_idx += 1
subtype = g.subtype
if subtype == None:
subtype =
type = g.type
if type == None:
type =
lv_gen_dict[lv_gen_idx] = {
: lv_district.lv_grid.id_db,
: g.v_level,
: type + + subtype,
: g.capacity,
}
for node in lv_district.lv_grid.graph_nodes_sorted():
if isinstance(node, LVLoadDing0):
lv_load_idx += 1
if in node.consumption:
tipo =
elif in node.consumption:
tipo =
elif in node.consumption:
tipo =
else:
tipo =
lv_load_dict[lv_load_idx] = {
: lv_district.lv_grid.id_db,
: tipo,
}
for branch in lv_district.lv_grid.graph_edges():
branch_idx += 1
branches_dict[branch_idx] = {
: lv_district.lv_grid.id_db,
: branch[].length / 1e3,
: branch[].type.to_frame().columns[0],
: branch[].kind,
}
for trafo in lv_district.lv_grid.station().transformers():
trafos_idx += 1
trafos_dict[trafos_idx] = {
: lv_district.lv_grid.id_db,
: trafo.s_max_a,
}
district_geo = transform(proj, lv_district.geo_data)
lv_dist_dict[lv_dist_idx].update({: district_geo.area})
lvgd_stats = pd.DataFrame.from_dict(lv_dist_dict, orient=).set_index()
gen_df = pd.DataFrame.from_dict(lv_gen_dict, orient=)
load_df = pd.DataFrame.from_dict(lv_load_dict, orient=)
branch_df = pd.DataFrame.from_dict(branches_dict, orient=)
trafos_df = pd.DataFrame.from_dict(trafos_dict, orient=)
if not gen_df.empty:
lv_generation = gen_df.groupby([, ])[].sum().to_frame().unstack(level=-1)
lv_generation.columns = [ + str(_[1]) if isinstance(_, tuple) else str(_) for _ in
lv_generation.columns]
lvgd_stats = pd.concat([lvgd_stats, lv_generation], axis=1)
lv_generation = gen_df.groupby([, ])[].sum().to_frame().unstack(level=-1)
lv_generation.columns = [ + str(_[1]) if isinstance(_, tuple) else str(_) for _ in
lv_generation.columns]
lvgd_stats = pd.concat([lvgd_stats, lv_generation], axis=1)
if not load_df.empty:
lv_loads = load_df[load_df[] == ].groupby([])[
].count().to_frame()
lv_loads.columns = []
lvgd_stats = pd.concat([lvgd_stats, lv_loads], axis=1)
lv_loads = load_df[load_df[] == ].groupby([])[
].count().to_frame()
lv_loads.columns = []
lvgd_stats = pd.concat([lvgd_stats, lv_loads], axis=1)
lv_loads = load_df[load_df[] == ].groupby([])[
].count().to_frame()
lv_loads.columns = []
lvgd_stats = pd.concat([lvgd_stats, lv_loads], axis=1)
if not branch_df.empty:
lv_branches = branch_df.groupby([, ])[].sum().to_frame().unstack(level=-1)
lv_branches.columns = [ + _[1] if isinstance(_, tuple) else _ for _ in lv_branches.columns]
lvgd_stats = pd.concat([lvgd_stats, lv_branches], axis=1)
lv_branches = branch_df[branch_df[] == ].groupby([])[].sum().to_frame()
lv_branches.columns = []
lvgd_stats = pd.concat([lvgd_stats, lv_branches], axis=1)
lv_branches = branch_df[branch_df[] == ].groupby([])[].sum().to_frame()
lv_branches.columns = []
lvgd_stats = pd.concat([lvgd_stats, lv_branches], axis=1)
lv_branches = branch_df.groupby([, ])[].count().to_frame().unstack(level=-1)
lv_branches.columns = [ + _[1] if isinstance(_, tuple) else _ for _ in
lv_branches.columns]
lvgd_stats = pd.concat([lvgd_stats, lv_branches], axis=1)
lv_branches = branch_df[branch_df[] == ].groupby([])[].count().to_frame()
lv_branches.columns = []
lvgd_stats = pd.concat([lvgd_stats, lv_branches], axis=1)
lv_branches = branch_df[branch_df[] == ].groupby([])[].count().to_frame()
lv_branches.columns = []
lvgd_stats = pd.concat([lvgd_stats, lv_branches], axis=1)
if not trafos_df.empty:
lv_trafos = trafos_df.groupby([])[].count().to_frame()
lv_trafos.columns = []
lvgd_stats = pd.concat([lvgd_stats, lv_trafos], axis=1)
lv_trafos = trafos_df.groupby([])[].sum().to_frame()
lv_trafos.columns = []
lvgd_stats = pd.concat([lvgd_stats, lv_trafos], axis=1)
lvgd_stats = lvgd_stats.fillna(0)
lvgd_stats = lvgd_stats[sorted(lvgd_stats.columns.tolist())]
return lvgd_stats | LV Statistics for an arbitrary network
Parameters
----------
nw: :any:`list` of NetworkDing0
The MV grid(s) to be studied
Returns
-------
lvgd_stats : pandas.DataFrame
Dataframe containing several statistical numbers about the LVGD |
964 | def update(self, name, **kwargs):
self.allowed(, kwargs, [, , ,
, , ])
kwargs = self.unused(kwargs)
return self.http_post( % name, params=kwargs) | Create a new node |
965 | def refetch_for_update(obj):
return obj.__class__.objects.select_for_update().get(id=obj.id) | Queries the database for the same object that is passed in, refetching
its contents and runs ``select_for_update()`` to lock the corresponding
row until the next commit.
:param obj:
Object to refetch
:returns:
Refreshed version of the object |
966 | def sendToTradepile(self, item_id, safe=True):
if safe and len(
self.tradepile()) >= self.tradepile_size: | Send to tradepile (alias for __sendToPile__).
:params item_id: Item id.
:params safe: (optional) False to disable tradepile free space check. |
967 | def _convert_value(self, item):
if item.ctype == 3:
try:
return datetime.datetime(*xlrd.xldate_as_tuple(item.value, self._book.datemode))
except ValueError:
return item.value
if item.ctype == 2:
if item.value % 1 == 0:
return int(item.value)
else:
return item.value
return item.value | Handle different value types for XLS. Item is a cell object. |
968 | def command_max_run_time(self, event=None):
try:
max_run_time = self.max_run_time_var.get()
except ValueError:
max_run_time = self.runtime_cfg.max_run_time
self.runtime_cfg.max_run_time = max_run_time
self.max_run_time_var.set(self.runtime_cfg.max_run_time) | CPU burst max running time - self.runtime_cfg.max_run_time |
969 | def add_require(self, require):
for p in self.requires:
if p.value == require.value:
return
self.requires.append(require) | Add a require object if it does not already exist |
970 | def stop(self):
super(AggregateDependency, self).stop()
if self.services:
return [
(service, reference)
for reference, service in self.services.items()
]
return None | Stops the dependency manager (must be called before clear())
:return: The removed bindings (list) or None |
971 | def _build_pools(self):
if self.level >= Topic:
self.topics_pool = set(self.topic() for i in range(self.pool_size))
if self.level >= Fact:
self.facts_pool = set(self.fact() for i in range(self.pool_size))
if self.level >= Theory:
self.theories_pool = set(self.theory() for i in range(self.pool_size))
if self.level >= Text:
self.propositions_pool = set(chain.from_iterable((self.topics_pool, self.facts_pool, self.theories_pool))) | Slow method, retrieve all the terms from the database.
:return: |
972 | def notify(self, correlation_id, args):
for listener in self._listeners:
try:
listener.on_event(correlation_id, self, args)
except Exception as ex:
raise InvocationException(
correlation_id,
"EXEC_FAILED",
"Raising event " + self._name + " failed: " + str(ex)
).with_details("event", self._name).wrap(ex) | Fires this event and notifies all registred listeners.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param args: the parameters to raise this event with. |
973 | def get_pk(obj):
if inspect.isclass(obj):
pk_list = sqlalchemy.inspect(obj).primary_key
else:
pk_list = obj.__mapper__.primary_key
return pk_list | Return primary key name by model class or instance.
:Parameters:
- `obj`: SQLAlchemy model instance or class.
:Examples:
>>> from sqlalchemy import Column, Integer
>>> from sqlalchemy.ext.declarative import declarative_base
>>> Base = declarative_base()
>>> class User(Base):
... __tablename__ = 'users'
... id = Column(Integer, primary_key=True)
>>> get_pk(User())
(Column('id', Integer(), table=<users>, primary_key=True, nullable=False),)
>>> get_pk(User)
(Column('id', Integer(), table=<users>, primary_key=True, nullable=False),) |
974 | def copy(self, cursor, f):
logger.info("Inserting file: %s", f)
cursor.execute( % (self.table, f, self._credentials(),
self.jsonpath, self.copy_json_options, self.copy_options)) | Defines copying JSON from s3 into redshift. |
975 | def getVerifiers(self):
contacts = list()
for verifier in self.getVerifiersIDs():
user = api.get_user(verifier)
contact = api.get_user_contact(user, ["LabContact"])
if contact:
contacts.append(contact)
return contacts | Returns the list of lab contacts that have verified at least one
analysis from this Analysis Request |
976 | def from_json(data):
memfiles = InMemoryFiles()
memfiles.files = json.loads(data)
return memfiles | Convert JSON into a in memory file storage.
Args:
data (str): valid JSON with path and filenames and
the base64 encoding of the file content.
Returns:
InMemoryFiles: in memory file storage |
977 | def create_file_service(self):
try:
from azure.storage.file.fileservice import FileService
return FileService(self.account_name, self.account_key,
sas_token=self.sas_token,
endpoint_suffix=self.endpoint_suffix)
except ImportError:
raise Exception(
+ ) | Creates a FileService object with the settings specified in the
CloudStorageAccount.
:return: A service object.
:rtype: :class:`~azure.storage.file.fileservice.FileService` |
978 | def add(self, elem):
if isinstance(elem, self._allowedTypes):
self._collection.add(elem)
self._collectedTypes.add(type(elem).__name__)
else:
raise CollectionTypeError("{} can only contain , is not allowed.".format(type(self).__name__, self._allowedTypes, elem)) | Adds _elem_ to the collection.
# Parameters
_elem_ : `object`
> The object to be added |
979 | def _symbols():
global _SYMBOLS
if _SYMBOLS is None:
tmp = [(s, ) for s in _data()[].keys()]
tmp += [(s, ) for s in _data()[].keys()]
tmp += [(s.name, ) for s in _data()[].values()]
_SYMBOLS = sorted(
tmp,
key=lambda s: (len(s[0]), ord(s[0][0])),
reverse=True)
return _SYMBOLS | (Lazy)load list of all supported symbols (sorted)
Look into `_data()` for all currency symbols, then sort by length and
unicode-ord (A-Z is not as relevant as ֏).
Returns:
List[unicode]: Sorted list of possible currency symbols. |
980 | def joint_img(self, num_iid, pic_path, session, id=None, position=None, is_major=None):
request = TOPRequest()
request[] = num_iid
request[] = pic_path
if id!=None:
request[] = id
if position!=None:
request[] = position
if is_major!=None:
request[] = is_major
self.create(self.execute(request, session)[])
return self | taobao.item.joint.img 商品关联子图
- 关联一张商品图片到num_iid指定的商品中
- 传入的num_iid所对应的商品必须属于当前会话的用户
- 商品图片关联在卖家身份和图片来源上的限制,卖家要是B卖家或订购了多图服务才能关联图片,并且图片要来自于卖家自己的图片空间才行
- 商品图片数量有限制。不管是上传的图片还是关联的图片,他们的总数不能超过一定限额 |
981 | def create_notes_folder(self, title, parentid=""):
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req(, post_data={
: title,
: parentid
})
return response | Create new folder
:param title: The title of the folder to create
:param parentid: The UUID of the parent folder |
982 | def required(wrapping_functions, patterns_rslt):
^staff/staff^mypage/mypage
if not hasattr(wrapping_functions, ):
wrapping_functions = (wrapping_functions,)
return [
_wrap_instance__resolve(wrapping_functions, instance)
for instance in patterns_rslt
] | USAGE:
from django.contrib.auth.decorators import login_required
from django.contrib.admin.views.decorators import staff_member_required
mypage_patterns = required(
login_required,
[
... url patterns ...
]
)
staff_patterns = required(
staff_member_required,
[
... url patterns ...
]
)
urlpatterns += [
url(r'^staff/', include(staff_patterns, namespace='staff')),
url(r'^mypage/', include(mypage_patterns, namespace='mypage')),
] |
983 | def _exec_request(self, service, method=None, path_args=None, data=None,
params=None):
if path_args is None:
path_args = []
req = {
: method or ,
: .join(str(a).strip() for a in [
cfg.CONF.tvdb.service_url, service] + path_args),
: json.dumps(data) if data else None,
: self.headers,
: params,
: cfg.CONF.tvdb.verify_ssl_certs,
}
LOG.debug(, req[], req[])
resp = self.session.request(**req)
resp.raise_for_status()
return resp.json() if resp.text else resp.text | Execute request. |
984 | def put(self, deviceId):
device = request.get_json()
logger.debug("Received /devices/" + deviceId + " - " + str(device))
self._deviceController.accept(deviceId, device)
return None, 200 | Puts a new device into the device store
:param deviceId:
:return: |
985 | def predict(self, log2_bayes_factors, reset_index=False):
if reset_index:
x = log2_bayes_factors.reset_index(level=0, drop=True)
else:
x = log2_bayes_factors
if isinstance(x, pd.DataFrame):
not_na = (x.notnull() > 0).any()
not_na_columns = not_na[not_na].index
x.ix[NULL_MODEL, not_na_columns] = self.logbf_thresh
elif isinstance(x, pd.Series):
x[NULL_MODEL] = self.logbf_thresh
return x.idxmax() | Guess the most likely modality for each event
For each event that has at least one non-NA value, if no modalilites
have logsumexp'd logliks greater than the log Bayes factor threshold,
then they are assigned the 'multimodal' modality, because we cannot
reject the null hypothesis that these did not come from the uniform
distribution.
Parameters
----------
log2_bayes_factors : pandas.DataFrame
A (4, n_events) dataframe with bayes factors for the Psi~1, Psi~0,
bimodal, and middle modalities. If an event has no bayes factors
for any of those modalities, it is ignored
reset_index : bool
If True, remove the first level of the index from the dataframe.
Useful if you are using this function to apply to a grouped
dataframe where the first level is something other than the
modality, e.g. the celltype
Returns
-------
modalities : pandas.Series
A (n_events,) series with the most likely modality for each event |
986 | def _wrap_tracebackexception_format(redact: Callable[[str], str]):
original_format = getattr(TracebackException, , None)
if original_format is None:
original_format = TracebackException.format
setattr(TracebackException, , original_format)
@wraps(original_format)
def tracebackexception_format(self, *, chain=True):
for line in original_format(self, chain=chain):
yield redact(line)
setattr(TracebackException, , tracebackexception_format) | Monkey-patch TracebackException.format to redact printed lines.
Only the last call will be effective. Consecutive calls will overwrite the
previous monkey patches. |
987 | def _param_deprecation_warning(schema, deprecated, context):
for i in deprecated:
if i in schema:
msg =
msg = msg.format(ctx = context, word = i)
warnings.warn(msg, Warning) | Raises warning about using the 'old' names for some parameters.
The new naming scheme just has two underscores on each end of the word for consistency |
988 | def xpath(request):
ident_hash = request.params.get()
xpath_string = request.params.get()
if not ident_hash or not xpath_string:
exc = httpexceptions.HTTPBadRequest
exc.explanation =
raise exc
try:
uuid, version = split_ident_hash(ident_hash)
except IdentHashShortId as e:
uuid = get_uuid(e.id)
version = e.version
except IdentHashMissingVersion as e:
uuid = e.id
version = get_latest_version(e.id)
except IdentHashSyntaxError:
raise httpexceptions.HTTPBadRequest
settings = get_current_registry().settings
with db_connect() as db_connection:
with db_connection.cursor() as cursor:
result = get_content_metadata(uuid, version, cursor)
resp = request.response
if result[] == COLLECTION_MIMETYPE:
matched_route = request.matched_route.name
results = xpath_book(request, uuid, version,
return_json=matched_route.endswith())
if matched_route.endswith():
results = {: list(results)}
resp.body = json.dumps(results)
resp.content_type =
else:
resp.body = results
resp.content_type =
else:
results = {: list(xpath_page(request, uuid, version))}
resp.body = json.dumps(results)
resp.content_type =
resp.status = "200 OK"
return resp | View for the route. Determines UUID and version from input request
and determines the type of UUID (collection or module) and executes
the corresponding method. |
989 | def telnet_sa_telnet_server_shutdown(self, **kwargs):
config = ET.Element("config")
telnet_sa = ET.SubElement(config, "telnet-sa", xmlns="urn:brocade.com:mgmt:brocade-sec-services")
telnet = ET.SubElement(telnet_sa, "telnet")
server = ET.SubElement(telnet, "server")
shutdown = ET.SubElement(server, "shutdown")
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
990 | def _get_bases(cls, ab):
start_bases, end_bases = [], []
for base in (, , ):
if ab.find(, start=base):
start_bases.append(base[0:1])
else:
start_bases.append()
if ab.find(, end=base):
end_bases.append(base[0:1])
else:
end_bases.append()
return .join(start_bases), .join(end_bases) | Start Bases & End Bases
:param ab: at bat object(type:Beautifulsoup)
:param attribute_name: attribute name
:return: start base, end base |
991 | def create_app():
app = App.create_app(__name__)
app.configure()
return app | Create the standard app for ``fleaker_config`` and register the two
routes required. |
992 | def reply_sticker(
self,
sticker: str,
quote: bool = None,
disable_notification: bool = None,
reply_to_message_id: int = None,
reply_markup: Union[
"pyrogram.InlineKeyboardMarkup",
"pyrogram.ReplyKeyboardMarkup",
"pyrogram.ReplyKeyboardRemove",
"pyrogram.ForceReply"
] = None,
progress: callable = None,
progress_args: tuple = ()
) -> "Message":
if quote is None:
quote = self.chat.type != "private"
if reply_to_message_id is None and quote:
reply_to_message_id = self.message_id
return self._client.send_sticker(
chat_id=self.chat.id,
sticker=sticker,
disable_notification=disable_notification,
reply_to_message_id=reply_to_message_id,
reply_markup=reply_markup,
progress=progress,
progress_args=progress_args
) | Bound method *reply_sticker* of :obj:`Message <pyrogram.Message>`.
Use as a shortcut for:
.. code-block:: python
client.send_sticker(
chat_id=message.chat.id,
sticker=sticker
)
Example:
.. code-block:: python
message.reply_sticker(sticker)
Args:
sticker (``str``):
Sticker to send.
Pass a file_id as string to send a sticker that exists on the Telegram servers,
pass an HTTP URL as a string for Telegram to get a .webp sticker file from the Internet, or
pass a file path as string to upload a new sticker that exists on your local machine.
quote (``bool``, *optional*):
If ``True``, the message will be sent as a reply to this message.
If *reply_to_message_id* is passed, this parameter will be ignored.
Defaults to ``True`` in group chats and ``False`` in private chats.
disable_notification (``bool``, *optional*):
Sends the message silently.
Users will receive a notification with no sound.
reply_to_message_id (``int``, *optional*):
If the message is a reply, ID of the original message.
reply_markup (:obj:`InlineKeyboardMarkup` | :obj:`ReplyKeyboardMarkup` | :obj:`ReplyKeyboardRemove` | :obj:`ForceReply`, *optional*):
Additional interface options. An object for an inline keyboard, custom reply keyboard,
instructions to remove reply keyboard or to force a reply from the user.
progress (``callable``, *optional*):
Pass a callback function to view the upload progress.
The function must take *(client, current, total, \*args)* as positional arguments (look at the section
below for a detailed description).
progress_args (``tuple``, *optional*):
Extra custom arguments for the progress callback function. Useful, for example, if you want to pass
a chat_id and a message_id in order to edit a message with the updated progress.
Other Parameters:
client (:obj:`Client <pyrogram.Client>`):
The Client itself, useful when you want to call other API methods inside the callback function.
current (``int``):
The amount of bytes uploaded so far.
total (``int``):
The size of the file.
*args (``tuple``, *optional*):
Extra custom arguments as defined in the *progress_args* parameter.
You can either keep *\*args* or add every single extra argument in your function signature.
Returns:
On success, the sent :obj:`Message <pyrogram.Message>` is returned.
In case the upload is deliberately stopped with :meth:`stop_transmission`, None is returned instead.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error. |
993 | def exists_or_mkdir(path, verbose=True):
if not os.path.exists(path):
if verbose:
logging.info("[*] creates %s ..." % path)
os.makedirs(path)
return False
else:
if verbose:
logging.info("[!] %s exists ..." % path)
return True | Check a folder by given name, if not exist, create the folder and return False,
if directory exists, return True.
Parameters
----------
path : str
A folder path.
verbose : boolean
If True (default), prints results.
Returns
--------
boolean
True if folder already exist, otherwise, returns False and create the folder.
Examples
--------
>>> tl.files.exists_or_mkdir("checkpoints/train") |
994 | def trace_min_buffer_capacity(self):
cmd = enums.JLinkTraceCommand.GET_MIN_CAPACITY
data = ctypes.c_uint32(0)
res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(data))
if (res == 1):
raise errors.JLinkException()
return data.value | Retrieves the minimum capacity the trace buffer can be configured with.
Args:
self (JLink): the ``JLink`` instance.
Returns:
The minimum configurable capacity for the trace buffer. |
995 | def can_import(self, file_uris, current_doc=None):
if len(file_uris) <= 0:
return False
for file_uri in file_uris:
file_uri = self.fs.safe(file_uri)
if not self.check_file_type(file_uri):
return False
return True | Check that the specified file looks like an image supported by PIL |
996 | def add_note(note, **kwargs):
note_i = Note()
note_i.ref_key = note.ref_key
note_i.set_ref(note.ref_key, note.ref_id)
note_i.value = note.value
note_i.created_by = kwargs.get()
db.DBSession.add(note_i)
db.DBSession.flush()
return note_i | Add a new note |
997 | def _wait(self, generator, method, timeout=None, *args, **kwargs):
if self.debug:
print("waiting for %s to pause" % generator)
original_timeout = timeout
while timeout is None or timeout > 0:
last_time = time.time()
if self._lock.acquire(False):
try:
if self.can_resume():
return method(generator, *args, **kwargs)
elif self.has_terminated():
raise RuntimeError("%s has already terminated" % generator)
finally:
self._lock.release()
if timeout is not None:
timeout -= time.time() - last_time
msg = "%s did not pause after %ss" % (generator, original_timeout)
if self.debug:
print(msg)
raise WaitTimeoutError(msg) | Wait until generator is paused before running 'method'. |
998 | def __run_git(cmd, path=None):
exe = [__get_git_bin()] + cmd
try:
proc = subprocess.Popen(exe, cwd=path, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except subprocess.CalledProcessError:
return None, None
except ValueError:
return None, None
except OSError:
return None, None
out, err = proc.communicate()
if IS_PYTHON3:
out = out.decode("utf-8")
if err:
print("Cmd () fails: %s" % (.join(exe), err))
return None, proc.returncode
return out.strip(), proc.returncode | internal run git command
:param cmd: git parameters as array
:param path: path where command will be executed
:return: tuple (<line>, <returncode>) |
999 | def bulk_modify(self, *filters_or_records, **kwargs):
values = kwargs.pop(, None)
if kwargs:
raise ValueError(.format(kwargs))
if not values:
raise ValueError()
if not isinstance(values, dict):
raise ValueError("values parameter must be dict of {: } pairs")
_type = validate_filters_or_records(filters_or_records)
request_payload = {}
record_stub = record_factory(self._app)
if _type is Record:
request_payload[] = [record.id for record in filters_or_records]
else:
filters = []
for filter_tuples in filters_or_records:
field_name = record_stub.get_field(filter_tuples[0])
filters.append({
"fieldId": field_name.id,
"filterType": filter_tuples[1],
"value": field_name.get_report(filter_tuples[2])
})
request_payload[] = filters
for field_name in list(values.keys()):
modification_operation = values[field_name]
if not isinstance(modification_operation, _BulkModificationOperation):
values[field_name] = Replace(modification_operation)
modifications = []
for field_name, modification_operation in values.items():
modification_field = record_stub.get_field(field_name)
if not modification_field.bulk_modify_support:
raise ValueError("Field of Type , is not supported for bulk modify".format(
field_name,
modification_field.__class__.__name__
))
modifications.append({
"fieldId": {
"value": modification_field.id,
"type": "id"
},
"value": modification_field.get_bulk_modify(modification_operation.value),
"type": modification_operation.type
})
request_payload[] = modifications
response = self._swimlane.request(, "app/{0}/record/batch".format(self._app.id), json=request_payload)
if _type is Record:
for record in filters_or_records:
for field_name, modification_operation in six.iteritems(values):
record[field_name] = modification_operation.value
return response.text | Shortcut to bulk modify records
.. versionadded:: 2.17.0
Args:
*filters_or_records (tuple) or (Record): Either a list of Records, or a list of filters.
Keyword Args:
values (dict): Dictionary of one or more 'field_name': 'new_value' pairs to update
Notes:
Requires Swimlane 2.17+
Examples:
::
# Bulk update records by filter
app.records.bulk_modify(
# Query filters
('Field_1', 'equals', value1),
('Field_2', 'equals', value2),
...
# New values for records
values={
"Field_3": value3,
"Field_4": value4,
...
}
)
# Bulk update records
record1 = app.records.get(tracking_id='APP-1')
record2 = app.records.get(tracking_id='APP-2')
record3 = app.records.get(tracking_id='APP-3')
app.records.bulk_modify(record1, record2, record3, values={"Field_Name": 'new value'})
Returns:
:class:`string`: Bulk Modify Job ID |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.