Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
9,300 | def timeseries_reactive(self):
if self._timeseries_reactive is None:
if self.grid.network.timeseries.load_reactive_power is not None:
self.power_factor =
self.reactive_power_mode =
ts_total = None
for sector in self.consumption.keys():
consumption = self.consumption[sector]
try:
ts = self.grid.network.timeseries.load_reactive_power[
sector].to_frame()
except KeyError:
logger.exception(
"No timeseries for load of type {} "
"given.".format(sector))
raise
ts = ts * consumption
if ts_total is None:
ts_total = ts
else:
ts_total.q += ts.q
return ts_total
else:
return None
else:
return self._timeseries_reactive | Reactive power time series in kvar.
Parameters
-----------
timeseries_reactive : :pandas:`pandas.Seriese<series>`
Series containing reactive power in kvar.
Returns
-------
:pandas:`pandas.Series<series>` or None
Series containing reactive power time series in kvar. If it is not
set it is tried to be retrieved from `load_reactive_power`
attribute of global TimeSeries object. If that is not possible
None is returned. |
9,301 | def usearch61_smallmem_cluster(intermediate_fasta,
percent_id=0.97,
minlen=64,
rev=False,
output_dir=".",
remove_usearch_logs=False,
wordlength=8,
usearch61_maxrejects=32,
usearch61_maxaccepts=1,
sizeorder=False,
HALT_EXEC=False,
output_uc_filepath=None,
log_name="smallmem_clustered.log",
sizeout=False,
consout_filepath=None):
log_filepath = join(output_dir, log_name)
params = {: minlen,
: intermediate_fasta,
: percent_id,
: output_uc_filepath,
: wordlength,
: usearch61_maxrejects,
: usearch61_maxaccepts,
: True
}
if sizeorder:
params[] = True
if not remove_usearch_logs:
params[] = log_filepath
if rev:
params[] =
else:
params[] =
if sizeout:
params[] = True
if consout_filepath:
params[] = consout_filepath
clusters_fp = output_uc_filepath
app = Usearch61(params, WorkingDir=output_dir, HALT_EXEC=HALT_EXEC)
app_result = app()
return clusters_fp, app_result | Performs usearch61 de novo clustering via cluster_smallmem option
Only supposed to be used with length sorted data (and performs length
sorting automatically) and does not support reverse strand matching
intermediate_fasta: fasta filepath to be clustered with usearch61
percent_id: percentage id to cluster at
minlen: minimum sequence length
rev: will enable reverse strand matching if True
output_dir: directory to output log, OTU mapping, and intermediate files
remove_usearch_logs: Saves usearch log files
wordlength: word length to use for initial high probability sequence matches
usearch61_maxrejects: Set to 'default' or an int value specifying max
rejects
usearch61_maxaccepts: Number of accepts allowed by usearch61
HALT_EXEC: application controller option to halt execution
output_uc_filepath: Path to write clusters (.uc) file.
log_name: filepath to write usearch61 generated log file
sizeout: If True, will save abundance data in output fasta labels.
consout_filepath: Needs to be set to save clustered consensus fasta
filepath used for chimera checking. |
9,302 | def display(self, image):
assert(image.mode == self.mode)
assert(image.size == self.size)
self._last_image = image.copy()
sz = image.width * image.height * 4
buf = bytearray(sz * 3)
m = self._mapping
for idx, (r, g, b, a) in enumerate(image.getdata()):
offset = sz + m[idx] * 4
brightness = (a >> 4) if a != 0xFF else self._brightness
buf[offset] = (0xE0 | brightness)
buf[offset + 1] = b
buf[offset + 2] = g
buf[offset + 3] = r
self._serial_interface.data(list(buf)) | Takes a 32-bit RGBA :py:mod:`PIL.Image` and dumps it to the daisy-chained
APA102 neopixels. If a pixel is not fully opaque, the alpha channel
value is used to set the brightness of the respective RGB LED. |
9,303 | def dfa_word_acceptance(dfa: dict, word: list) -> bool:
current_state = dfa[]
for action in word:
if (current_state, action) in dfa[]:
current_state = dfa[][current_state, action]
else:
return False
if current_state in dfa[]:
return True
else:
return False | Checks if a given **word** is accepted by a DFA,
returning True/false.
The word w is accepted by a DFA if DFA has an accepting run
on w. Since A is deterministic,
:math:`w ∈ L(A)` if and only if :math:`ρ(s_0 , w) ∈ F` .
:param dict dfa: input DFA;
:param list word: list of actions ∈ dfa['alphabet'].
:return: *(bool)*, True if the word is accepted, False in the
other case. |
9,304 | def start_request(self, headers, *, end_stream=False):
yield from _wait_for_events(self._resumed, self._stream_creatable)
stream_id = self._conn.get_next_available_stream_id()
self._priority.insert_stream(stream_id)
self._priority.block(stream_id)
self._conn.send_headers(stream_id, headers, end_stream=end_stream)
self._flush()
return stream_id | Start a request by sending given headers on a new stream, and return
the ID of the new stream.
This may block until the underlying transport becomes writable, and
the number of concurrent outbound requests (open outbound streams) is
less than the value of peer config MAX_CONCURRENT_STREAMS.
The completion of the call to this method does not mean the request is
successfully delivered - data is only correctly stored in a buffer to
be sent. There's no guarantee it is truly delivered.
:param headers: A list of key-value tuples as headers.
:param end_stream: To send a request without body, set `end_stream` to
`True` (default `False`).
:return: Stream ID as a integer, used for further communication. |
9,305 | def check_database_connected(db):
from sqlalchemy.exc import DBAPIError, SQLAlchemyError
errors = []
try:
with db.engine.connect() as connection:
connection.execute()
except DBAPIError as e:
msg = .format(e)
errors.append(Error(msg, id=health.ERROR_DB_API_EXCEPTION))
except SQLAlchemyError as e:
msg = .format(e)
errors.append(Error(msg, id=health.ERROR_SQLALCHEMY_EXCEPTION))
return errors | A built-in check to see if connecting to the configured default
database backend succeeds.
It's automatically added to the list of Dockerflow checks if a
:class:`~flask_sqlalchemy.SQLAlchemy` object is passed
to the :class:`~dockerflow.flask.app.Dockerflow` class during
instantiation, e.g.::
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from dockerflow.flask import Dockerflow
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/test.db'
db = SQLAlchemy(app)
dockerflow = Dockerflow(app, db=db) |
9,306 | def reverse(self):
n = len(self)
for i in range(n//2):
self[i], self[n-i-1] = self[n-i-1], self[i] | S.reverse() -- reverse *IN PLACE* |
9,307 | def _set_cpu_queue_info_state(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=cpu_queue_info_state.cpu_queue_info_state, is_container=, presence=False, yang_name="cpu-queue-info-state", rest_name="cpu-queue-info-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "container",
: ,
})
self.__cpu_queue_info_state = t
if hasattr(self, ):
self._set() | Setter method for cpu_queue_info_state, mapped from YANG variable /cpu_queue_info_state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_cpu_queue_info_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cpu_queue_info_state() directly.
YANG Description: QoS CPU Queue info |
9,308 | def get_data(self):
result = {}
for field in self.fields:
result[field.name] = self.data.get(field.name)
return result | Returns data from each field. |
9,309 | def button(self):
if self.type != EventType.TABLET_TOOL_BUTTON:
raise AttributeError(_wrong_prop.format(self.type))
return self._libinput.libinput_event_tablet_tool_get_button(
self._handle) | The button that triggered this event.
For events that are not of type
:attr:`~libinput.constant.EventType.TABLET_TOOL_BUTTON`, this property
raises :exc:`AttributeError`.
Returns:
int: The button triggering this event. |
9,310 | def list_nodes_full(call=None):
response = _query(, )
ret = {}
for item in response[]:
name = item[]
ret[name] = item
ret[name][] = item[]
ret[name][] = item[][]
ret[name][] = item[][]
ret[name][] = [item[][]]
ret[name][] = []
ret[name][] = item[]
if in item[][]:
ret[name][] =
return ret | List nodes, with all available information
CLI Example:
.. code-block:: bash
salt-cloud -F |
9,311 | def _finalCleanup(self):
for conn in self._connections.values():
conn.releaseConnectionResources()
assert not self._connections | Clean up all of our connections by issuing application-level close and
stop notifications, sending hail-mary final FIN packets (which may not
reach the other end, but nevertheless can be useful) when possible. |
9,312 | def gps_message_arrived(self, m):
gps_week = getattr(m, , None)
gps_timems = getattr(m, , None)
if gps_week is None:
gps_week = getattr(m, , None)
gps_timems = getattr(m, , None)
if gps_week is None:
if getattr(m, , None) is not None:
self.timebase = t
self.counts_since_gps = {} | adjust time base from GPS message |
9,313 | def move_identity(session, identity, uidentity):
if identity.uuid == uidentity.uuid:
return False
old_uidentity = identity.uidentity
identity.uidentity = uidentity
last_modified = datetime.datetime.utcnow()
old_uidentity.last_modified = last_modified
uidentity.last_modified = last_modified
identity.last_modified = last_modified
session.add(uidentity)
session.add(old_uidentity)
return True | Move an identity to a unique identity.
Shifts `identity` to the unique identity given in
`uidentity`. The function returns whether the operation
was executed successfully.
When `uidentity` is the unique identity currently related
to `identity`, this operation does not have any effect and
`False` will be returned as result.
:param session: database session
:param identity: identity to be moved
:param uidentity: unique identity where `identity` will be moved
:return: `True` if the identity was moved; `False` in any other
case |
9,314 | def inquire_property(name, doc=None):
def inquire_property(self):
if not self._started:
msg = ("Cannot read {0} from a security context whose "
"establishment has not yet been started.")
raise AttributeError(msg)
return getattr(self._inquire(**{name: True}), name)
return property(inquire_property, doc=doc) | Creates a property based on an inquire result
This method creates a property that calls the
:python:`_inquire` method, and return the value of the
requested information.
Args:
name (str): the name of the 'inquire' result information
Returns:
property: the created property |
9,315 | def reverse_transform(self, col):
output = pd.DataFrame(index=col.index)
output[self.col_name] = col.apply(self.safe_round, axis=1)
if self.subtype == :
output[self.col_name] = output[self.col_name].astype(int)
return output | Converts data back into original format.
Args:
col(pandas.DataFrame): Data to transform.
Returns:
pandas.DataFrame |
9,316 | def delete(name):
with Session() as session:
try:
session.VFolder(name).delete()
print_done()
except Exception as e:
print_error(e)
sys.exit(1) | Delete the given virtual folder. This operation is irreversible!
NAME: Name of a virtual folder. |
9,317 | def autoconf(self):
serverInfo = MemcachedInfo(self._host, self._port, self._socket_file)
return (serverInfo is not None) | Implements Munin Plugin Auto-Configuration Option.
@return: True if plugin can be auto-configured, False otherwise. |
9,318 | def stream_events(signals: Sequence[Signal], filter: Callable[[T_Event], bool] = None, *,
max_queue_size: int = 0) -> AsyncIterator[T_Event]:
@async_generator
async def streamer():
try:
while True:
event = await queue.get()
if filter is None or filter(event):
await yield_(event)
finally:
cleanup()
def cleanup():
nonlocal queue
if queue is not None:
for signal in signals:
signal.disconnect(queue.put_nowait)
queue = None
assert check_argument_types()
queue = Queue(max_queue_size)
for signal in signals:
signal.connect(queue.put_nowait)
gen = [streamer()]
weakref.finalize(gen[0], cleanup)
return gen.pop() | Return an async generator that yields events from the given signals.
Only events that pass the filter callable (if one has been given) are returned.
If no filter function was given, all events are yielded from the generator.
:param signals: the signals to get events from
:param filter: a callable that takes an event object as an argument and returns ``True`` if
the event should pass, ``False`` if not
:param max_queue_size: maximum size of the queue, after which it will start to drop events |
9,319 | def from_array(filename, data, iline=189,
xline=193,
format=SegySampleFormat.IBM_FLOAT_4_BYTE,
dt=4000,
delrt=0):
dt = int(dt)
delrt = int(delrt)
data = np.asarray(data)
dimensions = len(data.shape)
if dimensions not in range(2, 5):
problem = "Expected 2, 3, or 4 dimensions, {} was given".format(dimensions)
raise ValueError(problem)
spec = segyio.spec()
spec.iline = iline
spec.xline = xline
spec.format = format
spec.sorting = TraceSortingFormat.INLINE_SORTING
if dimensions == 2:
spec.ilines = [1]
spec.xlines = list(range(1, np.size(data,0) + 1))
spec.samples = list(range(np.size(data,1)))
spec.tracecount = np.size(data, 1)
if dimensions == 3:
spec.ilines = list(range(1, np.size(data, 0) + 1))
spec.xlines = list(range(1, np.size(data, 1) + 1))
spec.samples = list(range(np.size(data, 2)))
if dimensions == 4:
spec.ilines = list(range(1, np.size(data, 0) + 1))
spec.xlines = list(range(1, np.size(data, 1) + 1))
spec.offsets = list(range(1, np.size(data, 2)+ 1))
spec.samples = list(range(np.size(data,3)))
samplecount = len(spec.samples)
with segyio.create(filename, spec) as f:
tr = 0
for ilno, il in enumerate(spec.ilines):
for xlno, xl in enumerate(spec.xlines):
for offno, off in enumerate(spec.offsets):
f.header[tr] = {
segyio.su.tracf : tr,
segyio.su.cdpt : tr,
segyio.su.offset : off,
segyio.su.ns : samplecount,
segyio.su.dt : dt,
segyio.su.delrt : delrt,
segyio.su.iline : il,
segyio.su.xline : xl
}
if dimensions == 2: f.trace[tr] = data[tr, :]
if dimensions == 3: f.trace[tr] = data[ilno, xlno, :]
if dimensions == 4: f.trace[tr] = data[ilno, xlno, offno, :]
tr += 1
f.bin.update(
tsort=TraceSortingFormat.INLINE_SORTING,
hdt=dt,
dto=dt
) | Create a new SEGY file from an n-dimentional array. Create a structured
SEGY file with defaulted headers from a 2-, 3- or 4-dimensional array.
ilines, xlines, offsets and samples are inferred from the size of the
array. Please refer to the documentation for functions from_array2D,
from_array3D and from_array4D to see how the arrays are interpreted.
Structure-defining fields in the binary header and in the traceheaders are
set accordingly. Such fields include, but are not limited to iline, xline
and offset. The file also contains a defaulted textual header.
Parameters
----------
filename : string-like
Path to new file
data : 2-,3- or 4-dimensional array-like
iline : int or segyio.TraceField
Inline number field in the trace headers. Defaults to 189 as per the
SEG-Y rev1 specification
xline : int or segyio.TraceField
Crossline number field in the trace headers. Defaults to 193 as per the
SEG-Y rev1 specification
format : int or segyio.SegySampleFormat
Sample format field in the trace header. Defaults to IBM float 4 byte
dt : int-like
sample interval
delrt : int-like
Notes
-----
.. versionadded:: 1.8
Examples
--------
Create a file from a 3D array, open it and read an iline:
>>> segyio.tools.from_array(path, array3d)
>>> segyio.open(path, mode) as f:
... iline = f.iline[0]
... |
9,320 | def send(msg_type, send_async=False, *args, **kwargs):
message = message_factory(msg_type, *args, **kwargs)
try:
if send_async:
message.send_async()
else:
message.send()
except MessageSendError as e:
err_exit("Unable to send message: ", e) | Constructs a message class and sends the message.
Defaults to sending synchronously. Set send_async=True to send
asynchronously.
Args:
:msg_type: (str) the type of message to send, i.e. 'Email'
:send_async: (bool) default is False, set True to send asynchronously.
:kwargs: (dict) keywords arguments that are required for the
various message types. See docstrings for each type.
i.e. help(messages.Email), help(messages.Twilio), etc.
Example:
>>> kwargs = {
from_: '[email protected]',
to: '[email protected]',
auth: 'yourPassword',
subject: 'Email Subject',
body: 'Your message to send',
attachments: ['filepath1', 'filepath2'],
}
>>> messages.send('email', **kwargs)
Message sent... |
9,321 | def shift_coordinate_grid(self, x_shift, y_shift, pixel_unit=False):
if pixel_unit is True:
ra_shift, dec_shift = self.map_pix2coord(x_shift, y_shift)
else:
ra_shift, dec_shift = x_shift, y_shift
self._ra_at_xy_0 += ra_shift
self._dec_at_xy_0 += dec_shift
self._x_at_radec_0, self._y_at_radec_0 = util.map_coord2pix(-self._ra_at_xy_0, -self._dec_at_xy_0, 0, 0,
self._Ma2pix) | shifts the coordinate system
:param x_shif: shift in x (or RA)
:param y_shift: shift in y (or DEC)
:param pixel_unit: bool, if True, units of pixels in input, otherwise RA/DEC
:return: updated data class with change in coordinate system |
9,322 | def triangulize(image, tile_size):
if isinstance(image, basestring) or hasattr(image, ):
image = Image.open(image)
assert isinstance(tile_size, int)
image = prep_image(image, tile_size)
logging.info(, image.size)
pix = image.load()
draw = ImageDraw.Draw(image)
for x, y in iter_tiles(image, tile_size):
process_tile(x, y, tile_size, pix, draw, image)
return image | Processes the given image by breaking it down into tiles of the given
size and applying a triangular effect to each tile. Returns the processed
image as a PIL Image object.
The image can be given as anything suitable for passing to `Image.open`
(ie, the path to an image or as a file-like object containing image data).
If tile_size is 0, the tile size will be guessed based on the image
size. It will also be adjusted to be divisible by 2 if it is not already. |
9,323 | def search(self, index_name, query):
try:
results = self.els_search.search(index=index_name, body=query)
return results
except Exception, error:
error_str = % str(error)
error_str +=
print error_str
raise RuntimeError(error_str) | Search the given index_name with the given ELS query.
Args:
index_name: Name of the Index
query: The string to be searched.
Returns:
List of results.
Raises:
RuntimeError: When the search query fails. |
9,324 | def searchForGroups(self, name, limit=10):
params = {"search": name, "limit": limit}
j = self.graphql_request(GraphQL(query=GraphQL.SEARCH_GROUP, params=params))
return [Group._from_graphql(node) for node in j["viewer"]["groups"]["nodes"]] | Find and get group thread by its name
:param name: Name of the group thread
:param limit: The max. amount of groups to fetch
:return: :class:`models.Group` objects, ordered by relevance
:rtype: list
:raises: FBchatException if request failed |
9,325 | def avail_platforms():
ret = {}
for platform in CMD_MAP:
ret[platform] = True
for cmd in CMD_MAP[platform]:
if not salt.utils.path.which(cmd):
ret[platform] = False
return ret | Return which platforms are available
CLI Example:
.. code-block:: bash
salt myminion genesis.avail_platforms |
9,326 | def add_intercept_term(self, x):
nr_x,nr_f = x.shape
intercept = np.ones([nr_x,1])
x = np.hstack((intercept,x))
return x | Adds a column of ones to estimate the intercept term for
separation boundary |
9,327 | def create_collection(self, name, codec_options=None,
read_preference=None, write_concern=None,
read_concern=None, **kwargs):
if name in self.collection_names():
raise CollectionInvalid("collection %s already exists" % name)
return Collection(self, name, True, codec_options,
read_preference, write_concern,
read_concern, **kwargs) | Create a new :class:`~pymongo.collection.Collection` in this
database.
Normally collection creation is automatic. This method should
only be used to specify options on
creation. :class:`~pymongo.errors.CollectionInvalid` will be
raised if the collection already exists.
Options should be passed as keyword arguments to this method. Supported
options vary with MongoDB release. Some examples include:
- "size": desired initial size for the collection (in
bytes). For capped collections this size is the max
size of the collection.
- "capped": if True, this is a capped collection
- "max": maximum number of objects if capped (optional)
See the MongoDB documentation for a full list of supported options by
server version.
:Parameters:
- `name`: the name of the collection to create
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`. If ``None`` (the
default) the :attr:`codec_options` of this :class:`Database` is
used.
- `read_preference` (optional): The read preference to use. If
``None`` (the default) the :attr:`read_preference` of this
:class:`Database` is used.
- `write_concern` (optional): An instance of
:class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the
default) the :attr:`write_concern` of this :class:`Database` is
used.
- `read_concern` (optional): An instance of
:class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the
default) the :attr:`read_concern` of this :class:`Database` is
used.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`.
- `**kwargs` (optional): additional keyword arguments will
be passed as options for the create collection command
.. versionchanged:: 3.4
Added the collation option.
.. versionchanged:: 3.0
Added the codec_options, read_preference, and write_concern options.
.. versionchanged:: 2.2
Removed deprecated argument: options |
9,328 | def walk_dir(path, args, state):
if args.debug:
sys.stderr.write("Walking %s\n" % path)
for root, _dirs, files in os.walk(path):
if not safe_process_files(root, files, args, state):
return False
if state.should_quit():
return False
return True | Check all files in `path' to see if there is any requests that
we should send out on the bus. |
9,329 | def as_dict(self):
bson_nb_voro_list2 = self.to_bson_voronoi_list2()
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"bson_nb_voro_list2": bson_nb_voro_list2,
"structure": self.structure.as_dict(),
"normalized_angle_tolerance": self.normalized_angle_tolerance,
"normalized_distance_tolerance": self.normalized_distance_tolerance,
"additional_conditions": self.additional_conditions,
"valences": self.valences,
"maximum_distance_factor": self.maximum_distance_factor,
"minimum_angle_factor": self.minimum_angle_factor} | Bson-serializable dict representation of the VoronoiContainer.
:return: dictionary that is BSON-encodable |
9,330 | def add_model(self, *args, **kwargs):
if self.category != Category.MODEL:
raise APIError("Part should be of category MODEL")
return self._client.create_model(self, *args, **kwargs) | Add a new child model to this model.
In order to prevent the backend from updating the frontend you may add `suppress_kevents=True` as
additional keyword=value argument to this method. This will improve performance of the backend
against a trade-off that someone looking at the frontend won't notice any changes unless the page
is refreshed.
:return: a :class:`Part` of category `MODEL` |
9,331 | def fill_subparser(subparser):
subparser.add_argument(
"--shuffle-seed", help="Seed to use for randomizing order of the "
"training set on disk.",
default=config.default_seed, type=int, required=False)
return convert_ilsvrc2012 | Sets up a subparser to convert the ILSVRC2012 dataset files.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `ilsvrc2012` command. |
9,332 | def parse_ns_headers(ns_headers):
known_attrs = ("expires", "domain", "path", "secure",
"version", "port", "max-age")
result = []
for ns_header in ns_headers:
pairs = []
version_set = False
for ii, param in enumerate(re.split(r";\s*", ns_header)):
param = param.rstrip()
if param == "": continue
if "=" not in param:
k, v = param, None
else:
k, v = re.split(r"\s*=\s*", param, 1)
k = k.lstrip()
if ii != 0:
lc = k.lower()
if lc in known_attrs:
k = lc
if k == "version":
v = strip_quotes(v)
version_set = True
if k == "expires":
v = http2time(strip_quotes(v))
pairs.append((k, v))
if pairs:
if not version_set:
pairs.append(("version", "0"))
result.append(pairs)
return result | Ad-hoc parser for Netscape protocol cookie-attributes.
The old Netscape cookie format for Set-Cookie can for instance contain
an unquoted "," in the expires field, so we have to use this ad-hoc
parser instead of split_header_words.
XXX This may not make the best possible effort to parse all the crap
that Netscape Cookie headers contain. Ronald Tschalar's HTTPClient
parser is probably better, so could do worse than following that if
this ever gives any trouble.
Currently, this is also used for parsing RFC 2109 cookies. |
9,333 | def rearrange_jupytext_metadata(metadata):
for key in [, ]:
if key in metadata:
metadata[key.replace(, )] = metadata.pop(key)
jupytext_metadata = metadata.pop(, {})
if in metadata:
jupytext_metadata[] = metadata.pop()
if in metadata:
jupytext_metadata[] = {: metadata.pop()}
if in metadata:
jupytext_metadata[] = metadata.pop()
for entry in [, ]:
if entry in metadata:
jupytext_metadata[entry] = metadata.pop(entry)
filters = jupytext_metadata.pop(, {})
if in filters:
jupytext_metadata[] = filters[]
if in filters:
jupytext_metadata[] = filters[]
for filter_level in [, ]:
if filter_level in jupytext_metadata:
jupytext_metadata[filter_level] = metadata_filter_as_string(jupytext_metadata[filter_level])
if jupytext_metadata.get(, {}).get(, ).startswith():
formats = jupytext_metadata.get()
if formats:
jupytext_metadata[] = .join([ + fmt if fmt.rfind() > 0 else fmt
for fmt in formats.split()])
formats = jupytext_metadata.get()
if formats:
jupytext_metadata[] = short_form_multiple_formats(long_form_multiple_formats(formats, metadata))
if jupytext_metadata:
metadata[] = jupytext_metadata | Convert the jupytext_formats metadata entry to jupytext/formats, etc. See #91 |
9,334 | def _lookup_identity_names(self):
id_batch_size = 100
ac = get_auth_client()
self._resolved_map = {}
for i in range(0, len(self.identity_ids), id_batch_size):
chunk = self.identity_ids[i : i + id_batch_size]
resolved_result = ac.get_identities(ids=chunk)
for x in resolved_result["identities"]:
self._resolved_map[x["id"]] = x["username"] | Batch resolve identities to usernames.
Returns a dict mapping IDs to Usernames |
9,335 | def remote_tags(url):
tags = []
remote_git = Git()
for line in remote_git.ls_remote(, , url).split():
hash_ref = line.split()
tags.append(hash_ref[1][10:].replace(,))
return natsorted(tags) | List all available remote tags naturally sorted as version strings
:rtype: list
:param url: Remote URL of the repository
:return: list of available tags |
9,336 | def add_size_info (self):
maxbytes = self.aggregate.config["maxfilesizedownload"]
if self.size > maxbytes:
self.add_warning(
_("Content size %(size)s is larger than %(maxbytes)s.") %
dict(size=strformat.strsize(self.size),
maxbytes=strformat.strsize(maxbytes)),
tag=WARN_URL_CONTENT_SIZE_TOO_LARGE) | Set size of URL content (if any)..
Should be overridden in subclasses. |
9,337 | def start(self):
self.stop()
self.initialize()
self.handle = self.loop.call_at(self.get_next(), self.call_next) | Start scheduling |
9,338 | def should_filter(items):
return (vcfutils.get_paired(items) is not None and
any("damage_filter" in dd.get_tools_on(d) for d in items)) | Check if we should do damage filtering on somatic calling with low frequency events. |
9,339 | def _update_digital_forms(self, **update_props):
digital_forms = wrap_value(update_props[])
xpath_map = self._data_structures[update_props[]]
dist_format_props = (, , , )
dist_format_xroot = self._data_map[]
dist_format_xmap = {prop: xpath_map[prop] for prop in dist_format_props}
dist_formats = []
for digital_form in digital_forms:
dist_format = {prop: digital_form[prop] for prop in dist_format_props}
if digital_form.get():
dist_spec = wrap_value(digital_form.get())
dist_spec.append(_DIGITAL_FORMS_CONTENT_DELIM)
dist_spec.extend(wrap_value(digital_form[]))
dist_format[] = dist_spec
dist_formats.append(dist_format)
update_props[] = dist_formats
dist_formats = update_complex_list(
xpath_root=dist_format_xroot, xpath_map=dist_format_xmap, **update_props
)
trans_option_props = (, , )
trans_option_xroot = self._data_map[]
trans_option_xmap = {prop: self._data_map[ + prop] for prop in trans_option_props}
trans_options = []
for digital_form in digital_forms:
trans_options.append({prop: digital_form[prop] for prop in trans_option_props})
update_props[] = trans_options
trans_options = update_complex_list(
xpath_root=trans_option_xroot, xpath_map=trans_option_xmap, **update_props
)
return {
: dist_formats,
: trans_options
} | Update operation for ISO Digital Forms metadata
:see: gis_metadata.utils._complex_definitions[DIGITAL_FORMS] |
9,340 | def compute_K_numerical(dataframe, settings=None, keep_dir=None):
inversion_code = reda.rcParams.get(, )
if inversion_code == :
import reda.utils.geom_fac_crtomo as geom_fac_crtomo
if keep_dir is not None:
keep_dir = os.path.abspath(keep_dir)
K = geom_fac_crtomo.compute_K(
dataframe, settings, keep_dir)
else:
raise Exception(
.format(
inversion_code
))
return K | Use a finite-element modeling code to infer geometric factors for meshes
with topography or irregular electrode spacings.
Parameters
----------
dataframe : pandas.DataFrame
the data frame that contains the data
settings : dict
The settings required to compute the geometric factors. See examples
down below for more information in the required content.
keep_dir : path
if not None, copy modeling dir here
Returns
-------
K : :class:`numpy.ndarray`
K factors (are also directly written to the dataframe)
Examples
--------
::
settings = {
'rho': 100,
'elem': 'elem.dat',
'elec': 'elec.dat',
'sink_node': '100',
'2D': False,
} |
9,341 | def pdf_extract_text(path, pdfbox_path, pwd=, timeout=120):
if not os.path.isfile(path):
raise IOError()
if not os.path.isfile(pdfbox_path):
raise IOError()
import subprocess
for p in os.environ[].split():
if os.path.isfile(os.path.join(p, )):
break
else:
print()
return
try:
if pwd == :
cmd = [, , pdfbox_path, , path, path+]
else:
cmd = [, , pdfbox_path, , , pwd,
path, path+]
subprocess.check_call(cmd, stdin=subprocess.DEVNULL,
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, timeout=timeout)
except subprocess.TimeoutExpired as e:
print(.format(timeout/60))
except subprocess.CalledProcessError as e:
print() | Utility to use PDFBox from pdfbox.apache.org to extract Text from a PDF
Parameters
----------
path : str
Path to source pdf-file
pdfbox_path : str
Path to pdfbox-app-x.y.z.jar
pwd : str, optional
Password for protected pdf files
timeout : int, optional
Seconds to wait for a result before raising an exception (defaults to 120).
Returns
-------
file
Writes the result as the name of the source file and appends '.txt'.
Notes
-----
- Requires pdfbox-app-x.y.z.jar in a recent version (see http://pdfbox.apache.org).
- Requires Java (JDK) 1.5 or newer (see http://www.oracle.com/technetwork/java/javase/downloads/index.html).
- Requires java to be on the PATH. |
9,342 | def asarray(self, out=None, squeeze=True, lock=None, reopen=True,
maxsize=None, maxworkers=None, validate=True):
fh = self.parent.filehandle
byteorder = self.parent.tiff.byteorder
offsets, bytecounts = self._offsetscounts
self_ = self
self = self.keyframe
if not self._shape or product(self._shape) == 0:
return None
tags = self.tags
if validate or validate is None:
if maxsize is None:
maxsize = 2**44
if maxsize and product(self._shape) > maxsize:
raise ValueError( % str(self._shape))
if self.dtype is None:
raise ValueError( % (
self.sampleformat, self.bitspersample))
if self.compression not in TIFF.DECOMPESSORS:
raise ValueError(
% self.compression.name)
if in tags:
tag = tags[]
if tag.count != 1 and any((i-tag.value[0] for i in tag.value)):
raise ValueError(
% tag.value)
if self.is_subsampled and (self.compression not in (6, 7) or
self.planarconfig == 2):
raise NotImplementedError()
if validate is None:
return None
lock = fh.lock if lock is None else lock
with lock:
closed = fh.closed
if closed:
if reopen:
fh.open()
else:
raise IOError()
dtype = self._dtype
shape = self._shape
imagewidth = self.imagewidth
imagelength = self.imagelength
imagedepth = self.imagedepth
bitspersample = self.bitspersample
typecode = byteorder + dtype.char
lsb2msb = self.fillorder == 2
istiled = self.is_tiled
if istiled:
tilewidth = self.tilewidth
tilelength = self.tilelength
tiledepth = self.tiledepth
tw = (imagewidth + tilewidth - 1) // tilewidth
tl = (imagelength + tilelength - 1) // tilelength
td = (imagedepth + tiledepth - 1) // tiledepth
tiledshape = (td, tl, tw)
tileshape = (tiledepth, tilelength, tilewidth, shape[-1])
runlen = tilewidth
else:
runlen = imagewidth
if self.planarconfig == 1:
runlen *= self.samplesperpixel
if isinstance(out, str) and out == and self.is_memmappable:
with lock:
result = fh.memmap_array(typecode, shape, offset=offsets[0])
elif self.is_contiguous:
if out is not None:
out = create_output(out, shape, dtype)
with lock:
fh.seek(offsets[0])
result = fh.read_array(typecode, product(shape), out=out)
if lsb2msb:
bitorder_decode(result, out=result)
else:
result = create_output(out, shape, dtype)
decompress = TIFF.DECOMPESSORS[self.compression]
if self.compression in (6, 7):
colorspace = None
outcolorspace = None
jpegtables = None
if lsb2msb:
log.warning()
lsb2msb = False
if in tags:
jpegtables = self_._gettags({347}, lock=lock)[0][1].value
if in tags:
pass
elif self.photometric == 6:
outcolorspace =
elif self.photometric == 2:
if self.planarconfig == 2:
raise NotImplementedError(
)
colorspace = outcolorspace =
else:
outcolorspace = TIFF.PHOTOMETRIC(self.photometric).name
if istiled:
heightwidth = tilelength, tilewidth
else:
heightwidth = imagelength, imagewidth
def decompress(data, bitspersample=bitspersample,
jpegtables=jpegtables, colorspace=colorspace,
outcolorspace=outcolorspace, shape=heightwidth,
out=None, _decompress=decompress):
return _decompress(data, bitspersample, jpegtables,
colorspace, outcolorspace, shape, out)
def unpack(data):
return data.reshape(-1)
elif bitspersample in (8, 16, 32, 64, 128):
if (bitspersample * runlen) % 8:
raise ValueError()
if self.predictor == 3:
typecode = dtype.char
def unpack(data, typecode=typecode, out=None):
try:
return numpy.frombuffer(data, typecode)
except ValueError:
bps = bitspersample // 8
xlen = (len(data) // bps) * bps
return numpy.frombuffer(data[:xlen], typecode)
elif isinstance(bitspersample, tuple):
def unpack(data, out=None):
return unpack_rgb(data, typecode, bitspersample)
else:
def unpack(data, out=None):
return packints_decode(data, typecode, bitspersample,
runlen)
if istiled:
unpredict = TIFF.UNPREDICTORS[self.predictor]
def decode(tile, tileindex):
return tile_decode(tile, tileindex, tileshape, tiledshape,
lsb2msb, decompress, unpack, unpredict,
result[0])
tileiter = buffered_read(fh, lock, offsets, bytecounts)
if maxworkers is None:
maxworkers = 0 if self.compression > 1 else 1
if maxworkers == 0:
import multiprocessing
maxworkers = multiprocessing.cpu_count() // 2
if maxworkers < 2:
for i, tile in enumerate(tileiter):
decode(tile, i)
else:
decode(next(tileiter), 0)
with ThreadPoolExecutor(maxworkers) as executor:
executor.map(decode, tileiter, range(1, len(offsets)))
else:
stripsize = self.rowsperstrip * self.imagewidth
if self.planarconfig == 1:
stripsize *= self.samplesperpixel
outsize = stripsize * self.dtype.itemsize
result = result.reshape(-1)
index = 0
for strip in buffered_read(fh, lock, offsets, bytecounts):
if lsb2msb:
strip = bitorder_decode(strip, out=strip)
strip = decompress(strip, out=outsize)
strip = unpack(strip)
size = min(result.size, strip.size, stripsize,
result.size - index)
result[index:index+size] = strip[:size]
del strip
index += size
result.shape = self._shape
if self.predictor != 1 and not (istiled and not self.is_contiguous):
unpredict = TIFF.UNPREDICTORS[self.predictor]
result = unpredict(result, axis=-2, out=result)
if squeeze:
try:
result.shape = self.shape
except ValueError:
log.warning(,
result.shape, self.shape)
if closed:
fh.close()
return result | Read image data from file and return as numpy array.
Raise ValueError if format is unsupported.
Parameters
----------
out : numpy.ndarray, str, or file-like object
Buffer where image data will be saved.
If None (default), a new array will be created.
If numpy.ndarray, a writable array of compatible dtype and shape.
If 'memmap', directly memory-map the image data in the TIFF file
if possible; else create a memory-mapped array in a temporary file.
If str or open file, the file name or file object used to
create a memory-map to an array stored in a binary file on disk.
squeeze : bool
If True (default), all length-1 dimensions (except X and Y) are
squeezed out from the array.
If False, the shape of the returned array might be different from
the page.shape.
lock : {RLock, NullContext}
A reentrant lock used to syncronize reads from file.
If None (default), the lock of the parent's filehandle is used.
reopen : bool
If True (default) and the parent file handle is closed, the file
is temporarily re-opened and closed if no exception occurs.
maxsize: int
Maximum size of data before a ValueError is raised.
Can be used to catch DOS. Default: 16 TB.
maxworkers : int or None
Maximum number of threads to concurrently decode tile data.
If None (default), up to half the CPU cores are used for
compressed tiles.
See remarks in TiffFile.asarray.
validate : bool
If True (default), validate various parameters.
If None, only validate parameters and return None.
Returns
-------
numpy.ndarray
Numpy array of decompressed, depredicted, and unpacked image data
read from Strip/Tile Offsets/ByteCounts, formatted according to
shape and dtype metadata found in tags and parameters.
Photometric conversion, pre-multiplied alpha, orientation, and
colorimetry corrections are not applied. Specifically, CMYK images
are not converted to RGB, MinIsWhite images are not inverted,
and color palettes are not applied. |
9,343 | def commissionerUnregister(self):
print % self.port
cmd =
print cmd
return self.__sendCommand(cmd)[0] == | stop commissioner
Returns:
True: successful to stop commissioner
False: fail to stop commissioner |
9,344 | def _get(self, url,
param_dict={},
securityHandler=None,
additional_headers=[],
handlers=[],
proxy_url=None,
proxy_port=None,
compress=True,
custom_handlers=[],
out_folder=None,
file_name=None):
self._last_method = "GET"
CHUNK = 4056
param_dict, handler, cj = self._processHandler(securityHandler, param_dict)
headers = [] + additional_headers
if compress:
headers.append((, ))
else:
headers.append((, ))
headers.append((, self.useragent))
if len(param_dict.keys()) == 0:
param_dict = None
if handlers is None:
handlers = []
if handler is not None:
handlers.append(handler)
handlers.append(RedirectHandler())
if cj is not None:
handlers.append(request.HTTPCookieProcessor(cj))
if proxy_url is not None:
if proxy_port is None:
proxy_port = 80
proxies = {"http":"http://%s:%s" % (proxy_url, proxy_port),
"https":"https://%s:%s" % (proxy_url, proxy_port)}
proxy_support = request.ProxyHandler(proxies)
handlers.append(proxy_support)
opener = request.build_opener(*handlers)
opener.addheaders = headers
if param_dict is None:
resp = opener.open(url, data=param_dict)
elif len(str(urlencode(param_dict))) + len(url) >= 1999:
resp = opener.open(url, data=urlencode(param_dict))
else:
format_url = url + "?%s" % urlencode(param_dict)
resp = opener.open(fullurl=format_url)
self._last_code = resp.getcode()
self._last_url = resp.geturl()
maintype = self._mainType(resp)
contentDisposition = resp.headers.get()
contentEncoding = resp.headers.get()
contentType = resp.headers.get().split()[0].lower()
contentLength = resp.headers.get()
if maintype.lower() in (,
) or \
contentType == or \
(contentDisposition is not None and \
contentDisposition.lower().find() > -1):
fname = self._get_file_name(
contentDisposition=contentDisposition,
url=url)
if out_folder is None:
out_folder = tempfile.gettempdir()
if contentLength is not None:
max_length = int(contentLength)
if max_length < CHUNK:
CHUNK = max_length
file_name = os.path.join(out_folder, fname)
with open(file_name, ) as writer:
for data in self._chunk(response=resp,
size=CHUNK):
writer.write(data)
writer.flush()
writer.flush()
del writer
return file_name
else:
read = ""
for data in self._chunk(response=resp,
size=CHUNK):
if self.PY3 == True:
read += data.decode()
else:
read += data
del data
try:
results = json.loads(read)
if in results:
if in results[]:
if results[][] == :
if url.startswith():
url = url.replace(, )
return self._get(url,
param_dict,
securityHandler,
additional_headers,
handlers,
proxy_url,
proxy_port,
compress,
custom_handlers,
out_folder,
file_name)
return results
except:
return read | Performs a GET operation
Inputs:
Output:
returns dictionary, string or None |
9,345 | def set_default_init_cli_cmds(self):
init_cli_cmds = []
init_cli_cmds.append("set --retcode true")
init_cli_cmds.append("echo off")
init_cli_cmds.append("set --vt100 off")
init_cli_cmds.append(+self.name+)
init_cli_cmds.append([ + self.testcase + , True])
return init_cli_cmds | Default init commands are set --retcode true, echo off, set --vt100 off, set dut <dut name>
and set testcase <tc name>
:return: List of default cli initialization commands. |
9,346 | def _check_callback(callback):
if inspect.isclass(callback):
callback_object = callback()
if not callable(callback_object):
raise ValueError(
"Callback must be a class that implements __call__ or a function."
)
elif callable(callback):
callback_object = callback
else:
raise ValueError(
"Callback must be a class that implements __call__ or a function."
)
return callback_object | Turns a callback that is potentially a class into a callable object.
Args:
callback (object): An object that might be a class, method, or function.
if the object is a class, this creates an instance of it.
Raises:
ValueError: If an instance can't be created or it isn't a callable object.
TypeError: If the class requires arguments to be instantiated.
Returns:
callable: A callable object suitable for use as the consumer callback. |
9,347 | def loop(self, max_seconds=None):
loop_started = datetime.datetime.now()
self._is_running = True
while self._is_running:
self.process_error_queue(self.q_error)
if max_seconds is not None:
if (datetime.datetime.now() - loop_started).total_seconds() > max_seconds:
break
for subprocess in self._subprocesses:
if not subprocess.is_alive():
subprocess.start()
self.process_io_queue(self.q_stdout, sys.stdout)
self.process_io_queue(self.q_stderr, sys.stderr) | Main loop for the process. This will run continuously until maxiter |
9,348 | def ReadVarString(self, max=sys.maxsize):
length = self.ReadVarInt(max)
return self.unpack(str(length) + , length) | Similar to `ReadString` but expects a variable length indicator instead of the fixed 1 byte indicator.
Args:
max (int): (Optional) maximum number of bytes to read.
Returns:
bytes: |
9,349 | def make_posthook(self):
print(id(self.posthook), self.posthook)
print(id(super(self.__class__, self).posthook), super(self.__class__, self).posthook)
import ipdb;ipdb.set_trace()
if self.posthook:
os.chdir(self.project_name)
self.posthook() | Run the post hook into the project directory. |
9,350 | def en004(self, value=None):
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
.format(value))
self._en004 = value | Corresponds to IDD Field `en004`
mean coincident dry-bulb temperature to
Enthalpy corresponding to 0.4% annual cumulative frequency of occurrence
Args:
value (float): value for IDD Field `en004`
Unit: kJ/kg
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value |
9,351 | def console(self):
while True:
if six.PY2:
code = raw_input()
else:
code = input()
try:
print(self.eval(code))
except KeyboardInterrupt:
break
except Exception as e:
import traceback
if DEBUG:
sys.stderr.write(traceback.format_exc())
else:
sys.stderr.write( + str(e) + )
time.sleep(0.01) | starts to interact (starts interactive console) Something like code.InteractiveConsole |
9,352 | def set_attributes(self, cell_renderer, **attributes):
Gtk.CellLayout.clear_attributes(self, cell_renderer)
for (name, value) in attributes.items():
Gtk.CellLayout.add_attribute(self, cell_renderer, name, value) | :param cell_renderer: the :obj:`Gtk.CellRenderer` we're setting the attributes of
:type cell_renderer: :obj:`Gtk.CellRenderer`
{{ docs }} |
9,353 | def creep_kill(self, target, timestamp):
self.creep_kill_types[target] += 1
matched = False
for k, v in self.creep_types.iteritems():
if target.startswith(k):
matched = True
setattr(self, v, getattr(self, v) + 1)
break
if not matched:
print(.format(target)) | A creep was tragically killed. Need to split this into radiant/dire
and neutrals |
9,354 | def update(self, data_and_metadata: DataAndMetadata.DataAndMetadata, state: str, sub_area, view_id) -> None:
self.__state = state
self.__sub_area = sub_area
hardware_source_id = self.__hardware_source.hardware_source_id
channel_index = self.index
channel_id = self.channel_id
channel_name = self.name
metadata = copy.deepcopy(data_and_metadata.metadata)
hardware_source_metadata = dict()
hardware_source_metadata["hardware_source_id"] = hardware_source_id
hardware_source_metadata["channel_index"] = channel_index
if channel_id is not None:
hardware_source_metadata["reference_key"] = "_".join([hardware_source_id, channel_id])
hardware_source_metadata["channel_id"] = channel_id
else:
hardware_source_metadata["reference_key"] = hardware_source_id
if channel_name is not None:
hardware_source_metadata["channel_name"] = channel_name
if view_id:
hardware_source_metadata["view_id"] = view_id
metadata.setdefault("hardware_source", dict()).update(hardware_source_metadata)
data = data_and_metadata.data
master_data = self.__data_and_metadata.data if self.__data_and_metadata else None
data_matches = master_data is not None and data.shape == master_data.shape and data.dtype == master_data.dtype
if data_matches and sub_area is not None:
top = sub_area[0][0]
bottom = sub_area[0][0] + sub_area[1][0]
left = sub_area[0][1]
right = sub_area[0][1] + sub_area[1][1]
if top > 0 or left > 0 or bottom < data.shape[0] or right < data.shape[1]:
master_data = numpy.copy(master_data)
master_data[top:bottom, left:right] = data[top:bottom, left:right]
else:
master_data = numpy.copy(data)
else:
master_data = data
data_descriptor = data_and_metadata.data_descriptor
intensity_calibration = data_and_metadata.intensity_calibration if data_and_metadata else None
dimensional_calibrations = data_and_metadata.dimensional_calibrations if data_and_metadata else None
timestamp = data_and_metadata.timestamp
new_extended_data = DataAndMetadata.new_data_and_metadata(master_data, intensity_calibration=intensity_calibration, dimensional_calibrations=dimensional_calibrations, metadata=metadata, timestamp=timestamp, data_descriptor=data_descriptor)
self.__data_and_metadata = new_extended_data
self.data_channel_updated_event.fire(new_extended_data)
self.is_dirty = True | Called from hardware source when new data arrives. |
9,355 | def set_chat_photo(self, chat_id, photo):
from pytgbot.api_types.sendable.files import InputFile
assert_type_or_raise(chat_id, (int, unicode_type), parameter_name="chat_id")
assert_type_or_raise(photo, InputFile, parameter_name="photo")
result = self.do("setChatPhoto", chat_id=chat_id, photo=photo)
if self.return_python_objects:
logger.debug("Trying to parse {data}".format(data=repr(result)))
try:
return from_array_list(bool, result, list_level=0, is_builtin=True)
except TgApiParseException:
logger.debug("Failed parsing as primitive bool", exc_info=True)
raise TgApiParseException("Could not parse result.")
return result | Use this method to set a new profile photo for the chat. Photos can't be changed for private chats. The bot must be an administrator in the chat for this to work and must have the appropriate admin rights. Returns True on success.
Note: In regular groups (non-supergroups), this method will only work if the ‘All Members Are Admins’ setting is off in the target group.
https://core.telegram.org/bots/api#setchatphoto
Parameters:
:param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername)
:type chat_id: int | str|unicode
:param photo: New chat photo, uploaded using multipart/form-data
:type photo: pytgbot.api_types.sendable.files.InputFile
Returns:
:return: Returns True on success
:rtype: bool |
9,356 | def resolve_variables(self, provided_variables):
self.resolved_variables = {}
defined_variables = self.defined_variables()
variable_dict = dict((var.name, var) for var in provided_variables)
for var_name, var_def in defined_variables.items():
value = resolve_variable(
var_name,
var_def,
variable_dict.get(var_name),
self.name
)
self.resolved_variables[var_name] = value | Resolve the values of the blueprint variables.
This will resolve the values of the `VARIABLES` with values from the
env file, the config, and any lookups resolved.
Args:
provided_variables (list of :class:`stacker.variables.Variable`):
list of provided variables |
9,357 | def prt_results(self, goea_results):
if self.args.outfile is None:
self._prt_results(goea_results)
else:
outfiles = self.args.outfile.split(",")
grpwr = self.prepgrp.get_objgrpwr(goea_results) if self.prepgrp else None
if grpwr is None:
self.prt_outfiles_flat(goea_results, outfiles)
else:
grpwr.prt_outfiles_grouped(outfiles) | Print GOEA results to the screen or to a file. |
9,358 | def get_serializer(self, *args, **kwargs):
action = kwargs.pop(, None)
serializer_class = self.get_serializer_class(action)
kwargs[] = self.get_serializer_context()
return serializer_class(*args, **kwargs) | Returns the serializer instance that should be used to the
given action.
If any action was given, returns the serializer_class |
9,359 | def iter_directory(directory):
for path, dir, files in os.walk(directory):
for f in files:
filepath = os.path.join(path, f)
key = os.path.relpath(filepath, directory)
yield (filepath, key) | Given a directory, yield all files recursivley as a two-tuple (filepath, s3key) |
9,360 | def pop(self):
if self._count == 0:
raise StreamEmptyError("Pop called on buffered stream walker without any data", selector=self.selector)
while True:
curr = self.engine.get(self.storage_type, self.offset)
self.offset += 1
stream = DataStream.FromEncoded(curr.stream)
if self.matches(stream):
self._count -= 1
return curr | Pop a reading off of this stream and return it. |
9,361 | def get_tokens_by_code(self, code, state):
params = dict(oxd_id=self.oxd_id, code=code, state=state)
logger.debug("Sending command `get_tokens_by_code` with params %s",
params)
response = self.msgr.request("get_tokens_by_code", **params)
logger.debug("Received response: %s", response)
if response[] == :
raise OxdServerError(response[])
return response[] | Function to get access code for getting the user details from the
OP. It is called after the user authorizes by visiting the auth URL.
Parameters:
* **code (string):** code, parse from the callback URL querystring
* **state (string):** state value parsed from the callback URL
Returns:
**dict:** The tokens object with the following data structure.
Example response::
{
"access_token": "<token string>",
"expires_in": 3600,
"refresh_token": "<token string>",
"id_token": "<token string>",
"id_token_claims":
{
"iss": "https://server.example.com",
"sub": "24400320",
"aud": "s6BhdRkqt3",
"nonce": "n-0S6_WzA2Mj",
"exp": 1311281970,
"iat": 1311280970,
"at_hash": "MTIzNDU2Nzg5MDEyMzQ1Ng"
}
}
Raises:
**OxdServerError:** If oxd server throws an error OR if the params code
and scopes are of improper data type. |
9,362 | def _merge_mappings(*args):
dct = {}
for arg in args:
if isinstance(arg, dict):
merge = arg
else:
assert isinstance(arg, tuple)
keys, value = arg
merge = dict(zip(keys, [value]*len(keys)))
dct.update(merge)
return dct | Merges a sequence of dictionaries and/or tuples into a single dictionary.
If a given argument is a tuple, it must have two elements, the first of which is a sequence of keys and the second
of which is a single value, which will be mapped to from each of the keys in the sequence. |
9,363 | def AddPoly(self, poly, smart_duplicate_handling=True):
inserted_name = poly.GetName()
if poly.GetName() in self._name_to_shape:
if not smart_duplicate_handling:
raise ShapeError("Duplicate shape found: " + poly.GetName())
print ("Warning: duplicate shape id being added to collection: " +
poly.GetName())
if poly.GreedyPolyMatchDist(self._name_to_shape[poly.GetName()]) < 10:
print(" (Skipping as it apears to be an exact duplicate)")
else:
print(" (Adding new shape variant with uniquified name)")
inserted_name = "%s-%d" % (inserted_name, len(self._name_to_shape))
self._name_to_shape[inserted_name] = poly | Adds a new polyline to the collection. |
9,364 | def get_genes_for_hgnc_id(self, hgnc_symbol):
headers = {"content-type": "application/json"}
self.attempt = 0
ext = "/xrefs/symbol/homo_sapiens/{}".format(hgnc_symbol)
r = self.ensembl_request(ext, headers)
genes = []
for item in json.loads(r):
if item["type"] == "gene":
genes.append(item["id"])
return genes | obtain the ensembl gene IDs that correspond to a HGNC symbol |
9,365 | def main_nonexecutable_region_limbos_contain(self, addr, tolerance_before=64, tolerance_after=64):
closest_region = None
least_limbo = None
for start, end in self.main_nonexecutable_regions:
if start - tolerance_before <= addr < start:
if least_limbo is None or start - addr < least_limbo:
closest_region = (True, start)
least_limbo = start - addr
if end <= addr < end + tolerance_after:
if least_limbo is None or addr - end < least_limbo:
closest_region = (True, end)
least_limbo = addr - end
if closest_region is not None:
return closest_region
return False, None | Sometimes there exists a pointer that points to a few bytes before the beginning of a section, or a few bytes
after the beginning of the section. We take care of that here.
:param int addr: The address to check.
:return: A 2-tuple of (bool, the closest base address)
:rtype: tuple |
9,366 | def create_node(hostname, username, password, name, address):
ret = {: name, : {}, : False, : }
if __opts__[]:
return _test_output(ret, , params={
: hostname,
: username,
: password,
: name,
: address
}
)
existing = __salt__[](hostname, username, password, name)
if existing[] == 200:
ret[] = True
ret[] =
else:
ret = _load_result(existing, ret)
return ret | Create a new node if it does not already exist.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the node to create
address
The address of the node |
9,367 | def ReadSerializableArray(self, class_name, max=sys.maxsize):
module = .join(class_name.split()[:-1])
klassname = class_name.split()[-1]
klass = getattr(importlib.import_module(module), klassname)
length = self.ReadVarInt(max=max)
items = []
try:
for i in range(0, length):
item = klass()
item.Deserialize(self)
items.append(item)
except Exception as e:
logger.error("Couldn't deserialize %s " % e)
return items | Deserialize a stream into the object specific by `class_name`.
Args:
class_name (str): a full path to the class to be deserialized into. e.g. 'neo.Core.Block.Block'
max (int): (Optional) maximum number of bytes to read.
Returns:
list: list of `class_name` objects deserialized from the stream. |
9,368 | def get_vnetwork_portgroups_output_vnetwork_pgs_vlan(self, **kwargs):
config = ET.Element("config")
get_vnetwork_portgroups = ET.Element("get_vnetwork_portgroups")
config = get_vnetwork_portgroups
output = ET.SubElement(get_vnetwork_portgroups, "output")
vnetwork_pgs = ET.SubElement(output, "vnetwork-pgs")
vlan = ET.SubElement(vnetwork_pgs, "vlan")
vlan.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
9,369 | def _convert_pflags(self, pflags):
if (pflags & SFTP_FLAG_READ) and (pflags & SFTP_FLAG_WRITE):
flags = os.O_RDWR
elif pflags & SFTP_FLAG_WRITE:
flags = os.O_WRONLY
else:
flags = os.O_RDONLY
if pflags & SFTP_FLAG_APPEND:
flags |= os.O_APPEND
if pflags & SFTP_FLAG_CREATE:
flags |= os.O_CREAT
if pflags & SFTP_FLAG_TRUNC:
flags |= os.O_TRUNC
if pflags & SFTP_FLAG_EXCL:
flags |= os.O_EXCL
return flags | convert SFTP-style open() flags to Python's os.open() flags |
9,370 | def get_requires(self, profile=None):
out = []
for req in self.requires:
if ((req.profile and not profile) or
(req.profile and profile and req.profile != profile)):
continue
out.append(req)
return out | Get filtered list of Require objects in this Feature
:param str profile: Return Require objects with this profile or None
to return all Require objects.
:return: list of Require objects |
9,371 | def get_relavent_units(self):
relavent_units = {}
for location,unit in self.units.items():
if self.unit_is_related(location, self.worksheet):
relavent_units[location] = unit
return relavent_units | Retrieves the relevant units for this data block.
Returns:
All flags related to this block. |
9,372 | def get_annotation_data_before_time(self, id_tier, time):
if self.tiers[id_tier][1]:
return self.get_ref_annotation_before_time(id_tier, time)
befores = self.get_annotation_data_between_times(id_tier, 0, time)
if befores:
return [max(befores, key=lambda x: x[0])]
else:
return [] | Give the annotation before a given time. When the tier contains
reference annotations this will be returned, check
:func:`get_ref_annotation_data_before_time` for the format. If an
annotation overlaps with ``time`` that annotation will be returned.
:param str id_tier: Name of the tier.
:param int time: Time to get the annotation before.
:raises KeyError: If the tier is non existent. |
9,373 | def apply_index(self, i):
if type(self) is not DateOffset:
raise NotImplementedError("DateOffset subclass {name} "
"does not have a vectorized "
"implementation".format(
name=self.__class__.__name__))
kwds = self.kwds
relativedelta_fast = {, , , , ,
, , }
if (self._use_relativedelta and
set(kwds).issubset(relativedelta_fast)):
months = ((kwds.get(, 0) * 12 +
kwds.get(, 0)) * self.n)
if months:
shifted = liboffsets.shift_months(i.asi8, months)
i = type(i)(shifted, freq=i.freq, dtype=i.dtype)
weeks = (kwds.get(, 0)) * self.n
if weeks:
asper = i.to_period()
if not isinstance(asper._data, np.ndarray):
asper = asper._data
shifted = asper._time_shift(weeks)
i = shifted.to_timestamp() + i.to_perioddelta()
timedelta_kwds = {k: v for k, v in kwds.items()
if k in [, , ,
, ]}
if timedelta_kwds:
delta = Timedelta(**timedelta_kwds)
i = i + (self.n * delta)
return i
elif not self._use_relativedelta and hasattr(self, ):
return i + (self._offset * self.n)
else:
kwd = set(kwds) - relativedelta_fast
raise NotImplementedError("DateOffset with relativedelta "
"keyword(s) {kwd} not able to be "
"applied vectorized".format(kwd=kwd)) | Vectorized apply of DateOffset to DatetimeIndex,
raises NotImplentedError for offsets without a
vectorized implementation.
Parameters
----------
i : DatetimeIndex
Returns
-------
y : DatetimeIndex |
9,374 | def _draw_box(self, parent_node, quartiles, outliers, box_index, metadata):
width = (self.view.x(1) - self.view.x(0)) / self._order
series_margin = width * self._series_margin
left_edge = self.view.x(0) + width * box_index + series_margin
width -= 2 * series_margin
for i, whisker in enumerate((quartiles[0], quartiles[2],
quartiles[4])):
whisker_width = width if i == 1 else width / 2
shift = (width - whisker_width) / 2
xs = left_edge + shift
xe = left_edge + width - shift
alter(
self.svg.line(
parent_node,
coords=[(xs, self.view.y(whisker)),
(xe, self.view.y(whisker))],
class_=,
attrib={: 3}
), metadata
)
alter(
self.svg.line(
parent_node,
coords=[(left_edge + width / 2, self.view.y(quartiles[0])),
(left_edge + width / 2, self.view.y(quartiles[1]))],
class_=,
attrib={: 2}
), metadata
)
alter(
self.svg.line(
parent_node,
coords=[(left_edge + width / 2, self.view.y(quartiles[4])),
(left_edge + width / 2, self.view.y(quartiles[3]))],
class_=,
attrib={: 2}
), metadata
)
alter(
self.svg.node(
parent_node,
tag=,
x=left_edge,
y=self.view.y(quartiles[1]),
height=self.view.y(quartiles[3]) - self.view.y(quartiles[1]),
width=width,
class_=
), metadata
)
for o in outliers:
alter(
self.svg.node(
parent_node,
tag=,
cx=left_edge + width / 2,
cy=self.view.y(o),
r=3,
class_=
), metadata
)
return (
left_edge + width / 2,
self.view.y(sum(quartiles) / len(quartiles))
) | Return the center of a bounding box defined by a box plot.
Draws a box plot on self.svg. |
9,375 | def pkgPath(root, path, rpath="/"):
global data_files
if not os.path.exists(path):
return
files = []
for spath in os.listdir(path):
if spath == :
continue
subpath = os.path.join(path, spath)
spath = os.path.join(rpath, spath)
if os.path.isfile(subpath):
files.append(subpath)
if os.path.isdir(subpath):
pkgPath(root, subpath, spath)
data_files.append((root + rpath, files)) | Package up a path recursively |
9,376 | def fit_labels_to_mask(label_image, mask):
r
label_image = scipy.asarray(label_image)
mask = scipy.asarray(mask, dtype=scipy.bool_)
if label_image.shape != mask.shape:
raise ValueError()
labels = scipy.unique(label_image)
collection = {}
for label in labels:
collection[label] = [0, 0, []]
for x in range(label_image.shape[0]):
for y in range(label_image.shape[1]):
for z in range(label_image.shape[2]):
entry = collection[label_image[x,y,z]]
entry[0] += 1
if mask[x,y,z]: entry[1] += 1
entry[2].append((x,y,z))
for label in labels:
if collection[label][0] / 2. >= collection[label][1]:
del collection[label]
image_result = mask.copy()
image_result.fill(False)
for label, data in list(collection.items()):
for point in data[2]:
image_result[point] = True
return image_result | r"""
Reduces a label images by overlaying it with a binary mask and assign the labels
either to the mask or to the background. The resulting binary mask is the nearest
expression the label image can form of the supplied binary mask.
Parameters
----------
label_image : array_like
A nD label map.
mask : array_like
A mask image, i.e., a binary image with False for background and True for foreground.
Returns
-------
best_fit : ndarray
The best fit of the labels to the mask.
Raises
------
ValueError
If ``label_image`` and ``mask`` are not of the same shape. |
9,377 | def root_item_selected(self, item):
if self.show_all_files:
return
for root_item in self.get_top_level_items():
if root_item is item:
self.expandItem(root_item)
else:
self.collapseItem(root_item) | Root item has been selected: expanding it and collapsing others |
9,378 | def _filter_modules(self, plugins, names):
if self.module_plugin_filters:
original_length_plugins = len(plugins)
module_plugins = set()
for module_filter in self.module_plugin_filters:
module_plugins.update(module_filter(plugins, names))
if len(plugins) < original_length_plugins:
warning =
self._log.info(warning.format(module_filter))
plugins = module_plugins
return plugins | Internal helper method to parse all of the plugins and names
through each of the module filters |
9,379 | def _wait_and_except_if_failed(self, event, timeout=None):
event.wait(timeout or self.__sync_timeout)
self._except_if_failed(event) | Combines waiting for event and call to `_except_if_failed`. If timeout is not specified the configured
sync_timeout is used. |
9,380 | def get_version():
if not in sys.platform:
return NO_WIN
win_ver = sys.getwindowsversion()
try:
major, minor, build = win_ver.platform_version
except AttributeError:
if sys.version_info < (3, 0):
from platform import _get_real_winver
major, minor, build = _get_real_winver(win_ver.major, win_ver.minor, win_ver.build)
major, minor, build = int(major), int(minor), int(build)
else:
except (IndexError, ValueError):
sp_ver = 0
return (major, minor, sp_ver, is_server) | Get the Windows OS version running on the machine.
Params:
None
Returns:
The Windows OS version running on the machine (comparables with the values list in the class). |
9,381 | def redraw(self, whence=0):
with self._defer_lock:
whence = min(self._defer_whence, whence)
if not self.defer_redraw:
if self._hold_redraw_cnt == 0:
self._defer_whence = self._defer_whence_reset
self.redraw_now(whence=whence)
else:
self._defer_whence = whence
return
elapsed = time.time() - self.time_last_redraw
if (not self._defer_flag) or (elapsed > self.defer_lagtime):
if elapsed > self.defer_lagtime:
if self._hold_redraw_cnt > 0:
self._defer_whence = whence
return
self._defer_whence = self._defer_whence_reset
self.logger.debug("lagtime expired--forced redraw")
self.redraw_now(whence=whence)
return
self._defer_flag = True
self._defer_whence = whence
secs = self.defer_lagtime - elapsed
self.logger.debug("defer redraw (whence=%.2f) in %.f sec" % (
whence, secs))
self.reschedule_redraw(secs)
else:
self._defer_whence = whence
self.logger.debug("update whence=%.2f" % (whence)) | Redraw the canvas.
Parameters
----------
whence
See :meth:`get_rgb_object`. |
9,382 | def set_mode_px4(self, mode, custom_mode, custom_sub_mode):
if isinstance(mode, str):
mode_map = self.mode_mapping()
if mode_map is None or mode not in mode_map:
print("Unknown mode " % mode)
return
mode, custom_mode, custom_sub_mode = px4_map[mode]
self.mav.command_long_send(self.target_system, self.target_component,
mavlink.MAV_CMD_DO_SET_MODE, 0, mode, custom_mode, custom_sub_mode, 0, 0, 0, 0) | enter arbitrary mode |
9,383 | def updateStatus(self, dataset, is_dataset_valid):
if( dataset == "" ):
dbsExceptionHandler("dbsException-invalid-input", "DBSDataset/updateStatus. dataset is required.")
conn = self.dbi.connection()
trans = conn.begin()
try:
self.updatestatus.execute(conn, dataset, is_dataset_valid, trans)
trans.commit()
trans = None
except Exception as ex:
if trans:
trans.rollback()
raise ex
finally:
if trans:
trans.rollback()
if conn:
conn.close() | Used to toggle the status of a dataset is_dataset_valid=0/1 (invalid/valid) |
9,384 | def Kdiag(self, X):
vyt = self.variance_Yt
vyx = self.variance_Yx
lyt = 1./(2*self.lengthscale_Yt)
lyx = 1./(2*self.lengthscale_Yx)
a = self.a
b = self.b
c = self.c
k3 = ( 4*3*lyx**2 )*vyt*vyx
Kdiag = np.zeros(X.shape[0])
slices = index_to_slices(X[:,-1])
for i, ss1 in enumerate(slices):
for s1 in ss1:
if i==0:
Kdiag[s1]+= vyt*vyx
elif i==1:
Kdiag[s1]+= b**2*k1 - 2*a*c*k2 + a**2*k3 + c**2*vyt*vyx
else:
raise ValueError("invalid input/output index")
return Kdiag | Compute the diagonal of the covariance matrix associated to X. |
9,385 | def collect(context=None, style=None, palette=None, **kwargs):
params = {}
if context:
params.update(get(context, , **kwargs))
if style:
params.update(get(style, , **kwargs))
if palette:
params.update(get(palette, , **kwargs))
return params | Returns the merged rcParams dict of the specified context, style, and palette.
Parameters
----------
context: str
style: str
palette: str
kwargs:
-
Returns
-------
rcParams: dict
The merged parameter dicts of the specified context, style, and palette.
Notes
-----
The rcParams dicts are loaded and updated in the order: context, style, palette. That means if
a context parameter is also defined in the style or palette dict, it will be overwritten. There
is currently no checking being done to avoid this. |
9,386 | def open_file(self, file_):
with open(file_, , encoding=) as file:
text =
for line in file:
text += line
return text | Receives a file path has input and returns a
string with the contents of the file |
9,387 | def happens(intervals: Iterable[float], name: Optional[str] = None) -> Callable:
def hook(event: Callable):
def make_happen(*args_event: Any, **kwargs_event: Any) -> None:
if name is not None:
local.name = cast(str, name)
for interval in intervals:
advance(interval)
add(event, *args_event, **kwargs_event)
return make_happen
return hook | Decorator used to set up a process that adds a new instance of another process at intervals dictated by the given
sequence (which may be infinite).
Example: the following program runs process named `my_process` 5 times, each time spaced by 2.0 time units.
```
from itertools import repeat
sim = Simulator()
log = []
@happens(repeat(2.0, 5))
def my_process(the_log):
the_log.append(now())
sim.add(my_process, log)
sim.run()
print(str(log)) # Expect: [2.0, 4.0, 6.0, 8.0, 10.0]
``` |
9,388 | def last_archive(self):
archives = {}
for archive in self.archives():
archives[int(archive.split()[0].split()[-1])] = archive
return archives and archives[max(archives)] or None | Get the last available archive
:return: |
9,389 | def _make_image_to_vec_tito(feature_name, tmp_dir=None, checkpoint=None):
def _image_to_vec(image_str_tensor):
def _decode_and_resize(image_tensor):
f_out.write(f_in.read())
with tf.Session() as sess:
saver.restore(sess, checkpoint_tmp)
output_graph_def = tf.graph_util.convert_variables_to_constants(sess,
g.as_graph_def(),
[so.op.name])
file_io.delete_file(checkpoint_tmp)
if tmp_dir is None:
shutil.rmtree(checkpoint_dir)
tensors_out = tf.import_graph_def(output_graph_def,
input_map={si.name: tensor_in},
return_elements=[so.name])
return tensors_out[0]
return _tito_out
if not checkpoint:
checkpoint = INCEPTION_V3_CHECKPOINT
return _tito_from_checkpoint(_image_to_vec, checkpoint, INCEPTION_EXCLUDED_VARIABLES) | Creates a tensor-in-tensor-out function that produces embeddings from image bytes.
Image to embedding is implemented with Tensorflow's inception v3 model and a pretrained
checkpoint. It returns 1x2048 'PreLogits' embeddings for each image.
Args:
feature_name: The name of the feature. Used only to identify the image tensors so
we can get gradients for probe in image prediction explaining.
tmp_dir: a local directory that is used for downloading the checkpoint. If
non, a temp folder will be made and deleted.
checkpoint: the inception v3 checkpoint gs or local path. If None, default checkpoint
is used.
Returns: a tensor-in-tensor-out function that takes image string tensor and returns embeddings. |
9,390 | def get_hints(self, plugin):
hints = []
for hint_name in getattr(plugin, , []):
hint_plugin = self._plugins.get(hint_name)
if hint_plugin:
hint_result = Result(
name=hint_plugin.name,
homepage=hint_plugin.homepage,
from_url=self.requested_url,
type=HINT_TYPE,
plugin=plugin.name,
)
hints.append(hint_result)
logger.debug(f)
else:
logger.error(f)
return hints | Return plugin hints from ``plugin``. |
9,391 | def _get_instance(self):
try:
instance = self.compute_driver.ex_get_node(
self.running_instance_id,
zone=self.region
)
except ResourceNotFoundError as e:
raise GCECloudException(
.format(
id=self.running_instance_id, error=e
)
)
return instance | Retrieve instance matching instance_id. |
9,392 | def intersection(self, other, ignore_conflicts=False):
result = self.copy()
result.intersection_update(other, ignore_conflicts)
return result | Return a new definition from the intersection of the definitions. |
9,393 | def _assign_method(self, resource_class, method_type):
method_name = resource_class.get_method_name(
resource_class, method_type)
valid_status_codes = getattr(
resource_class.Meta,
,
DEFAULT_VALID_STATUS_CODES
)
@self._cache
def get(self, uid=None, method_type=method_type,
method_name=method_name,
valid_status_codes=valid_status_codes,
resource=resource_class, data=None, **kwargs):
uid = uid.lower() if isinstance(uid, str) else uid
return self.call_api(
method_type, method_name,
valid_status_codes, resource,
data, uid=uid, **kwargs)
setattr(
self, method_name,
types.MethodType(get, self)
)
self._all_get_methods_names.append(method_name) | Exactly the same code as the original except:
- uid is now first parameter (after self). Therefore, no need to explicitly call 'uid='
- Ignored the other http methods besides GET (as they are not needed for the pokeapi.co API)
- Added cache wrapping function
- Added a way to list all get methods |
9,394 | def clean_metric_name(self, metric_name):
if not self._clean_metric_name:
return metric_name
metric_name = str(metric_name)
for _from, _to in self.cleaning_replacement_list:
metric_name = metric_name.replace(_from, _to)
return metric_name | Make sure the metric is free of control chars, spaces, tabs, etc. |
9,395 | def pm(client, event, channel, nick, rest):
if rest:
rest = rest.strip()
Karma.store.change(rest, 2)
rcpt = rest
else:
rcpt = channel
if random.random() > 0.95:
return f"Arrggh ye be doin good work, {rcpt}!" | Arggh matey |
9,396 | def from_description(cls, description, attrs):
hash_key = None
range_key = None
index_type = description["Projection"]["ProjectionType"]
includes = description["Projection"].get("NonKeyAttributes")
for data in description["KeySchema"]:
name = data["AttributeName"]
if name not in attrs:
continue
key_type = data["KeyType"]
if key_type == "HASH":
hash_key = TableField(name, attrs[name].data_type, key_type)
elif key_type == "RANGE":
range_key = TableField(name, attrs[name].data_type, key_type)
throughput = description["ProvisionedThroughput"]
return cls(
description["IndexName"],
index_type,
description["IndexStatus"],
hash_key,
range_key,
throughput["ReadCapacityUnits"],
throughput["WriteCapacityUnits"],
description.get("IndexSizeBytes", 0),
includes,
description,
) | Create an object from a dynamo3 response |
9,397 | def facts(self):
fact = lib.EnvGetNextFact(self._env, ffi.NULL)
while fact != ffi.NULL:
yield new_fact(self._env, fact)
fact = lib.EnvGetNextFact(self._env, fact) | Iterate over the asserted Facts. |
9,398 | def create_base_logger(config=None, parallel=None):
if parallel is None: parallel = {}
parallel_type = parallel.get("type", "local")
cores = parallel.get("cores", 1)
if parallel_type == "ipython":
from bcbio.log import logbook_zmqpush
fqdn_ip = socket.gethostbyname(socket.getfqdn())
ips = [fqdn_ip] if (fqdn_ip and not fqdn_ip.startswith("127.")) else []
if not ips:
ips = [ip for ip in socket.gethostbyname_ex(socket.gethostname())[2]
if not ip.startswith("127.")]
if not ips:
ips += [(s.connect((, 53)), s.getsockname()[0], s.close())[1] for s in
[socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]]
if not ips:
sys.stderr.write("Cannot resolve a local IP address that isn't 127.x.x.x "
"Your machines might not have a local IP address "
"assigned or are not able to resolve it.\n")
sys.exit(1)
uri = "tcp://%s" % ips[0]
subscriber = logbook_zmqpush.ZeroMQPullSubscriber()
mport = subscriber.socket.bind_to_random_port(uri)
wport_uri = "%s:%s" % (uri, mport)
parallel["log_queue"] = wport_uri
subscriber.dispatch_in_background(_create_log_handler(config, True))
elif cores > 1:
subscriber = IOSafeMultiProcessingSubscriber(mpq)
subscriber.dispatch_in_background(_create_log_handler(config))
else:
pass
return parallel | Setup base logging configuration, also handling remote logging.
Correctly sets up for local, multiprocessing and distributed runs.
Creates subscribers for non-local runs that will be references from
local logging.
Retrieves IP address using tips from http://stackoverflow.com/a/1267524/252589 |
9,399 | def pre_serialize(self, raw, pkt, i):
self.length = len(raw) + OpenflowHeader._MINLEN | Set length of the header based on |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.