Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
8,400 | def _service_is_sysv(name):
s /lib/init/upstart-job, and anything that isn
script = .format(name)
return not _service_is_upstart(name) and os.access(script, os.X_OK) | A System-V style service will have a control script in
/etc/init.d. We make sure to skip over symbolic links that point
to Upstart's /lib/init/upstart-job, and anything that isn't an
executable, like README or skeleton. |
8,401 | def current(self):
results = self._timeline.find_withtag(tk.CURRENT)
return results[0] if len(results) != 0 else None | Currently active item on the _timeline Canvas
:rtype: str |
8,402 | def kde_peak(self, name, npoints=_npoints, **kwargs):
data = self.get(name,**kwargs)
return kde_peak(data,npoints) | Calculate peak of kernel density estimator |
8,403 | def pop_empty_columns(self, empty=None):
empty = [, None] if empty is None else empty
if len(self) == 0:
return
for col in list(self.columns):
if self[0][col] in empty:
if not [v for v in self.get_column(col) if v not in empty]:
self.pop_column(col) | This will pop columns from the printed columns if they only contain
'' or None
:param empty: list of values to treat as empty |
8,404 | def get_join_cols(by_entry):
left_cols = []
right_cols = []
for col in by_entry:
if isinstance(col, str):
left_cols.append(col)
right_cols.append(col)
else:
left_cols.append(col[0])
right_cols.append(col[1])
return left_cols, right_cols | helper function used for joins
builds left and right join list for join function |
8,405 | def recarray(self):
return numpy.rec.fromrecords(self.records, names=self.names) | Returns data as :class:`numpy.recarray`. |
8,406 | def order_replicant_volume(self, volume_id, snapshot_schedule,
location, tier=None):
file_mask = \
\
\
\
file_volume = self.get_file_volume_details(volume_id,
mask=file_mask)
order = storage_utils.prepare_replicant_order_object(
self, snapshot_schedule, location, tier, file_volume,
)
return self.client.call(, , order) | Places an order for a replicant file volume.
:param volume_id: The ID of the primary volume to be replicated
:param snapshot_schedule: The primary volume's snapshot
schedule to use for replication
:param location: The location for the ordered replicant volume
:param tier: The tier (IOPS per GB) of the primary volume
:return: Returns a SoftLayer_Container_Product_Order_Receipt |
8,407 | def _lookup_user_data(self,*args,**kwargs):
user_data = self.get_user_data()
data_kind = kwargs.get(,)
try:
del(kwargs[])
except KeyError, err:
pass
default_value = kwargs[]
result = get_dict(user_data,data_kind,*args,**kwargs)
try:
result = int(result)
except:
pass
if not isinstance(result,default_value.__class__):
return default_value
else:
return result | Generic function for looking up values in
a user-specific dictionary. Use as follows::
_lookup_user_data('path','to','desired','value','in','dictionary',
default = <default value>,
data_kind = 'customization'/'saved_searches') |
8,408 | def mv_to_pypsa(network):
generators = network.mv_grid.generators
loads = network.mv_grid.graph.nodes_by_attribute()
branch_tees = network.mv_grid.graph.nodes_by_attribute()
lines = network.mv_grid.graph.lines()
lv_stations = network.mv_grid.graph.nodes_by_attribute()
mv_stations = network.mv_grid.graph.nodes_by_attribute()
disconnecting_points = network.mv_grid.graph.nodes_by_attribute(
)
storages = network.mv_grid.graph.nodes_by_attribute(
)
omega = 2 * pi * 50
generator = {: [],
: [],
: [],
: [],
: []}
bus = {: [], : [], : [], : []}
load = {: [], : []}
line = {: [],
: [],
: [],
: [],
: [],
: [],
: [],
: []}
transformer = {: [],
: [],
: [],
: [],
: [],
: [],
: [],
: [],
: []}
storage = {
: [],
: [],
: [],
: [],
: [],
: [],
: []}
for gen in generators:
bus_name = .join([, repr(gen)])
generator[].append(repr(gen))
generator[].append(bus_name)
generator[].append()
generator[].append(gen.nominal_capacity / 1e3)
generator[].append(.join([gen.type, gen.subtype]))
bus[].append(bus_name)
bus[].append(gen.grid.voltage_nom)
bus[].append(gen.geom.x)
bus[].append(gen.geom.y)
for bt in branch_tees:
bus[].append(.join([, repr(bt)]))
bus[].append(bt.grid.voltage_nom)
bus[].append(bt.geom.x)
bus[].append(bt.geom.y)
for lo in loads:
bus_name = .join([, repr(lo)])
load[].append(repr(lo))
load[].append(bus_name)
bus[].append(bus_name)
bus[].append(lo.grid.voltage_nom)
bus[].append(lo.geom.x)
bus[].append(lo.geom.y)
for l in lines:
line[].append(repr(l[]))
if l[][0] in lv_stations:
line[].append(
.join([, l[][0].__repr__(side=)]))
else:
line[].append(.join([, repr(l[][0])]))
if l[][1] in lv_stations:
line[].append(
.join([, l[][1].__repr__(side=)]))
else:
line[].append(.join([, repr(l[][1])]))
line[].append("")
line[].append(
l[].type[] / l[].quantity * omega / 1e3 *
l[].length)
line[].append(l[].type[] / l[].quantity *
l[].length)
line[].append(
sqrt(3) * l[].type[] * l[].type[] *
l[].quantity / 1e3)
line[].append(l[].length)
for lv_st in lv_stations:
transformer_count = 1
bus0_name = .join([, lv_st.__repr__(side=)])
bus[].append(bus0_name)
bus[].append(lv_st.mv_grid.voltage_nom)
bus[].append(lv_st.geom.x)
bus[].append(lv_st.geom.y)
bus1_name = .join([, lv_st.__repr__(side=)])
bus[].append(bus1_name)
bus[].append(lv_st.transformers[0].voltage_op)
bus[].append(None)
bus[].append(None)
return components | Translate MV grid topology representation to PyPSA format
MV grid topology translated here includes
* MV station (no transformer, see :meth:`~.grid.network.EDisGo.analyze`)
* Loads, Generators, Lines, Storages, Branch Tees of MV grid level as well
as LV stations. LV stations do not have load and generation of LV level.
Parameters
----------
network : Network
eDisGo grid container
Returns
-------
dict of :pandas:`pandas.DataFrame<dataframe>`
A DataFrame for each type of PyPSA components constituting the grid
topology. Keys included
* 'Generator'
* 'Load'
* 'Line'
* 'BranchTee'
* 'Transformer'
* 'StorageUnit'
.. warning::
PyPSA takes resistance R and reactance X in p.u. The conversion from
values in ohm to pu notation is performed by following equations
.. math::
r_{p.u.} = R_{\Omega} / Z_{B}
x_{p.u.} = X_{\Omega} / Z_{B}
with
Z_{B} = V_B / S_B
I'm quite sure, but its not 100 % clear if the base voltage V_B is
chosen correctly. We take the primary side voltage of transformer as
the transformers base voltage. See
`#54 <https://github.com/openego/eDisGo/issues/54>`_ for discussion. |
8,409 | async def answer(self, text: typing.Union[base.String, None] = None,
show_alert: typing.Union[base.Boolean, None] = None,
url: typing.Union[base.String, None] = None,
cache_time: typing.Union[base.Integer, None] = None):
await self.bot.answer_callback_query(callback_query_id=self.id, text=text,
show_alert=show_alert, url=url, cache_time=cache_time) | Use this method to send answers to callback queries sent from inline keyboards.
The answer will be displayed to the user as a notification at the top of the chat screen or as an alert.
Alternatively, the user can be redirected to the specified Game URL.
For this option to work, you must first create a game for your bot via @Botfather and accept the terms.
Otherwise, you may use links like t.me/your_bot?start=XXXX that open your bot with a parameter.
Source: https://core.telegram.org/bots/api#answercallbackquery
:param text: Text of the notification. If not specified, nothing will be shown to the user, 0-200 characters
:type text: :obj:`typing.Union[base.String, None]`
:param show_alert: If true, an alert will be shown by the client instead of a notification
at the top of the chat screen. Defaults to false.
:type show_alert: :obj:`typing.Union[base.Boolean, None]`
:param url: URL that will be opened by the user's client.
:type url: :obj:`typing.Union[base.String, None]`
:param cache_time: The maximum amount of time in seconds that the
result of the callback query may be cached client-side.
:type cache_time: :obj:`typing.Union[base.Integer, None]`
:return: On success, True is returned.
:rtype: :obj:`base.Boolean` |
8,410 | def start(port, root_directory, bucket_depth):
application = S3Application(root_directory, bucket_depth)
http_server = httpserver.HTTPServer(application)
http_server.listen(port)
ioloop.IOLoop.current().start() | Starts the mock S3 server on the given port at the given path. |
8,411 | def setup_sanitize_files(self):
for fname in self.get_sanitize_files():
with open(fname, ) as f:
self.sanitize_patterns.update(get_sanitize_patterns(f.read())) | For each of the sanitize files that were specified as command line options
load the contents of the file into the sanitise patterns dictionary. |
8,412 | def lookup_genome_alignment_index(index_fh, indexed_fh, out_fh=sys.stdout,
key=None, verbose=False):
bound_iter = functools.partial(genome_alignment_iterator,
reference_species="hg19", index_friendly=True)
hash_func = JustInTimeGenomeAlignmentBlock.build_hash
idx = IndexedFile(record_iterator=bound_iter, record_hash_function=hash_func)
idx.read_index(index_fh, indexed_fh)
if key is None:
while key is None or key.strip() != "":
sys.stderr.write("[WAITING FOR KEY ENTRY ON STDIN; " +
"END WITH EMPTY LINE]\n")
key = raw_input()
key = .join(key.split()).strip()
if key != "":
out_fh.write(str(idx[key]) + "\n")
sys.stderr.write("\n")
else:
key = .join(key.split())
out_fh.write(str(idx[key]) + "\n") | Load a GA index and its indexed file and extract one or more blocks.
:param index_fh: the index file to load. Can be a filename or a
stream-like object.
:param indexed_fh: the file that the index was built for,
:param key: A single key, iterable of keys, or None. This key will be
used for lookup. If None, user is prompted to enter keys
interactively. |
8,413 | def _create_client(base_url: str, tls: TLSConfig=False) -> Optional[APIClient]:
try:
client = APIClient(base_url=base_url, tls=tls, version="auto")
return client if client.ping() else None
except:
return None | Creates a Docker client with the given details.
:param base_url: the base URL of the Docker daemon
:param tls: the Docker daemon's TLS config (if any)
:return: the created client else None if unable to connect the client to the daemon |
8,414 | def volume_percentage_used(self, volume):
volume = self._get_volume(volume)
if volume is not None:
total = int(volume["size"]["total"])
used = int(volume["size"]["used"])
if used is not None and used > 0 and \
total is not None and total > 0:
return round((float(used) / float(total)) * 100.0, 1) | Total used size in percentage for volume |
8,415 | def get_theme(self):
xblock_settings = self.get_xblock_settings(default={})
if xblock_settings and self.theme_key in xblock_settings:
return xblock_settings[self.theme_key]
return self.default_theme_config | Gets theme settings from settings service. Falls back to default (LMS) theme
if settings service is not available, xblock theme settings are not set or does
contain mentoring theme settings. |
8,416 | def add_to_queue(self, series):
result = self._android_api.add_to_queue(series_id=series.series_id)
return result | Add a series to the queue
@param crunchyroll.models.Series series
@return bool |
8,417 | def MoveToAttributeNo(self, no):
ret = libxml2mod.xmlTextReaderMoveToAttributeNo(self._o, no)
return ret | Moves the position of the current instance to the attribute
with the specified index relative to the containing element. |
8,418 | def get_family_hierarchy_session(self, proxy):
if not self.supports_family_hierarchy():
raise errors.Unimplemented()
return sessions.FamilyHierarchySession(proxy=proxy, runtime=self._runtime) | Gets the ``OsidSession`` associated with the family hierarchy service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.relationship.FamilyHierarchySession) - a
``FamilyHierarchySession`` for families
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_family_hierarchy()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_family_hierarchy()`` is ``true``.* |
8,419 | def _write(self, _new=False):
pipeline = self.db.pipeline()
self._create_membership(pipeline)
self._update_indices(pipeline)
h = {}
for k, v in self.attributes.iteritems():
if isinstance(v, DateTimeField):
if v.auto_now:
setattr(self, k, datetime.now())
if v.auto_now_add and _new:
setattr(self, k, datetime.now())
elif isinstance(v, DateField):
if v.auto_now:
setattr(self, k, date.today())
if v.auto_now_add and _new:
setattr(self, k, date.today())
for_storage = getattr(self, k)
if for_storage is not None:
h[k] = v.typecast_for_storage(for_storage)
for index in self.indices:
if index not in self.lists and index not in self.attributes:
v = getattr(self, index)
if callable(v):
v = v()
if v:
try:
h[index] = unicode(v)
except UnicodeError:
h[index] = unicode(v.decode())
pipeline.delete(self.key())
if h:
pipeline.hmset(self.key(), h)
for k, v in self.lists.iteritems():
l = List(self.key()[k], pipeline=pipeline)
l.clear()
values = getattr(self, k)
if values:
if v._redisco_model:
l.extend([item.id for item in values])
else:
l.extend(values)
pipeline.execute() | Writes the values of the attributes to the datastore.
This method also creates the indices and saves the lists
associated to the object. |
8,420 | def handle_delete(self):
from intranet.apps.eighth.models import EighthScheduledActivity
EighthScheduledActivity.objects.filter(eighthsignup_set__user=self).update(
archived_member_count=F()+1) | Handle a graduated user being deleted. |
8,421 | def find_stream(self, **kwargs):
found = list(self.find_streams(**kwargs).values())
if not found:
raise StreamNotFoundError(kwargs)
if len(found) > 1:
raise MultipleStreamsFoundError(kwargs)
return found[0] | Finds a single stream with the given meta data values. Useful for debugging purposes.
:param kwargs: The meta data as keyword arguments
:return: The stream found |
8,422 | def box_plot(x, y, xlabel=LABEL_DEFAULT, ylabel=LABEL_DEFAULT, title=LABEL_DEFAULT):
if (not isinstance(x, tc.data_structures.sarray.SArray) or
not isinstance(y, tc.data_structures.sarray.SArray) or
x.dtype != str or y.dtype not in [int, float]):
raise ValueError("turicreate.visualization.box_plot supports " +
"x as SArray of dtype str and y as SArray of dtype: int, float." +
"\nExample: turicreate.visualization.box_plot(tc.SArray([,,,,]),tc.SArray([4.0,3.25,2.1,2.0,1.0]))")
title = _get_title(title)
plt_ref = tc.extensions.plot_boxes_and_whiskers(x, y,
xlabel, ylabel, title)
return Plot(plt_ref) | Plots the data in `x` on the X axis and the data in `y` on the Y axis
in a 2d box and whiskers plot, and returns the resulting Plot object.
The function x as SArray of dtype str and y as SArray of dtype: int, float.
Parameters
----------
x : SArray
The data to plot on the X axis of the box and whiskers plot.
Must be an SArray with dtype string.
y : SArray
The data to plot on the Y axis of the box and whiskers plot.
Must be numeric (int/float) and must be the same length as `x`.
xlabel : str (optional)
The text label for the X axis. Defaults to "X".
ylabel : str (optional)
The text label for the Y axis. Defaults to "Y".
title : str (optional)
The title of the plot. Defaults to LABEL_DEFAULT. If the value is
LABEL_DEFAULT, the title will be "<xlabel> vs. <ylabel>". If the value
is None, the title will be omitted. Otherwise, the string passed in as the
title will be used as the plot title.
Returns
-------
out : Plot
A :class: Plot object that is the box and whiskers plot.
Examples
--------
Make a box and whiskers plot.
>>> bp = turicreate.visualization.box_plot(tc.SArray(['a','b','c','a','a']),tc.SArray([4.0,3.25,2.1,2.0,1.0])) |
8,423 | def set_tolerance(self, tolerance):
cairo.cairo_set_tolerance(self._pointer, tolerance)
self._check_status() | Sets the tolerance used when converting paths into trapezoids.
Curved segments of the path will be subdivided
until the maximum deviation between the original path
and the polygonal approximation is less than tolerance.
The default value is 0.1.
A larger value will give better performance,
a smaller value, better appearance.
(Reducing the value from the default value of 0.1
is unlikely to improve appearance significantly.)
The accuracy of paths within Cairo is limited
by the precision of its internal arithmetic,
and the prescribed tolerance is restricted
to the smallest representable internal value.
:type tolerance: float
:param tolerance: The tolerance, in device units (typically pixels) |
8,424 | def create_device_enrollment(self, enrollment_identity, **kwargs):
kwargs[] = True
if kwargs.get():
return self.create_device_enrollment_with_http_info(enrollment_identity, **kwargs)
else:
(data) = self.create_device_enrollment_with_http_info(enrollment_identity, **kwargs)
return data | Place an enrollment claim for one or several devices. # noqa: E501
When the device connects to the bootstrap server and provides the enrollment ID, it will be assigned to your account. <br> **Example usage:** ``` curl -X POST \\ -H 'Authorization: Bearer <valid access token>' \\ -H 'content-type: application/json' \\ https://api.us-east-1.mbedcloud.com/v3/device-enrollments \\ -d '{\"enrollment_identity\": \"A-35:e7:72:8a:07:50:3b:3d:75:96:57:52:72:41:0d:78:cc:c6:e5:53:48:c6:65:58:5b:fa:af:4d:2d:73:95:c5\"}' ``` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.create_device_enrollment(enrollment_identity, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param EnrollmentId enrollment_identity: (required)
:return: EnrollmentIdentity
If the method is called asynchronously,
returns the request thread. |
8,425 | def workflow_get_details(object_id, input_params={}, always_retry=True, **kwargs):
return DXHTTPRequest( % object_id, input_params, always_retry=always_retry, **kwargs) | Invokes the /workflow-xxxx/getDetails API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Details-and-Links#API-method%3A-%2Fclass-xxxx%2FgetDetails |
8,426 | def cleanup():
for install_dir in linters.INSTALL_DIRS:
try:
shutil.rmtree(install_dir, ignore_errors=True)
except Exception:
print(
"{0}\nFailed to delete {1}".format(
traceback.format_exc(), install_dir
)
)
sys.stdout.flush() | Delete standard installation directories. |
8,427 | def strip_lastharaka(text):
if text:
if is_vocalized(text):
return re.sub(LASTHARAKA_PATTERN, u, text)
return text | Strip the last Haraka from arabic word except Shadda.
The striped marks are :
- FATHA, DAMMA, KASRA
- SUKUN
- FATHATAN, DAMMATAN, KASRATAN
@param text: arabic text.
@type text: unicode.
@return: return a striped text.
@rtype: unicode. |
8,428 | def _get_table_cells(table):
sent_map = defaultdict(list)
for sent in table.sentences:
if sent.is_tabular():
sent_map[sent.cell].append(sent)
return sent_map | Helper function with caching for table cells and the cells' sentences.
This function significantly improves the speed of `get_row_ngrams`
primarily by reducing the number of queries that are made (which were
previously the bottleneck. Rather than taking a single mention, then its
sentence, then its table, then all the cells in the table, then all the
sentences in each cell, and performing operations on that series of
queries, this performs a single query for all the sentences in a table and
returns all of the cells and the cells sentences directly.
:param table: the Table object to cache.
:return: an iterator of (Cell, [Sentence._asdict(), ...]) tuples. |
8,429 | def load_bytes(self,
bytes_data,
key,
bucket_name=None,
replace=False,
encrypt=False):
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
if not replace and self.check_for_key(key, bucket_name):
raise ValueError("The key {key} already exists.".format(key=key))
extra_args = {}
if encrypt:
extra_args[] = "AES256"
filelike_buffer = BytesIO(bytes_data)
client = self.get_conn()
client.upload_fileobj(filelike_buffer, bucket_name, key, ExtraArgs=extra_args) | Loads bytes to S3
This is provided as a convenience to drop a string in S3. It uses the
boto infrastructure to ship a file to s3.
:param bytes_data: bytes to set as content for the key.
:type bytes_data: bytes
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which to store the file
:type bucket_name: str
:param replace: A flag to decide whether or not to overwrite the key
if it already exists
:type replace: bool
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:type encrypt: bool |
8,430 | def from_string(s):
if not len(s):
raise ValueError("Invalid parameter string.")
params = parse_qs(u(s), keep_blank_values=False)
if not len(params):
raise ValueError("Invalid parameter string.")
try:
key = params[][0]
except Exception:
raise ValueError(" not found in OAuth request.")
try:
secret = params[][0]
except Exception:
raise ValueError(" not found in "
"OAuth request.")
token = Token(key, secret)
try:
token.callback_confirmed = params[][0]
except KeyError:
pass
return token | Deserializes a token from a string like one returned by
`to_string()`. |
8,431 | def get_ip_address_list(list_name):
*
payload = {"jsonrpc": "2.0",
"id": "ID0",
"method": "get_policy_ip_addresses",
"params": [list_name, 0, 256]}
response = __proxy__[](payload, False)
return _convert_to_list(response, ) | Retrieves a specific IP address list.
list_name(str): The name of the specific policy IP address list to retrieve.
CLI Example:
.. code-block:: bash
salt '*' bluecoat_sslv.get_ip_address_list MyIPAddressList |
8,432 | def _get_or_insert_async(*args, **kwds):
from . import tasklets
cls, name = args
get_arg = cls.__get_arg
app = get_arg(kwds, )
namespace = get_arg(kwds, )
parent = get_arg(kwds, )
context_options = get_arg(kwds, )
if not isinstance(name, basestring):
raise TypeError( % name)
elif not name:
raise ValueError()
key = Key(cls, name, app=app, namespace=namespace, parent=parent)
@tasklets.tasklet
def internal_tasklet():
@tasklets.tasklet
def txn():
ent = yield key.get_async(options=context_options)
if ent is None:
ent = cls(**kwds)
ent._key = key
yield ent.put_async(options=context_options)
raise tasklets.Return(ent)
if in_transaction():
ent = yield txn()
else:
ent = yield key.get_async(options=context_options)
if ent is None:
ent = yield transaction_async(txn)
raise tasklets.Return(ent)
return internal_tasklet() | Transactionally retrieves an existing entity or creates a new one.
This is the asynchronous version of Model._get_or_insert(). |
8,433 | def __get_container_path(self, host_path):
libname = os.path.split(host_path)[1]
return os.path.join(_container_lib_location, libname) | A simple helper function to determine the path of a host library
inside the container
:param host_path: The path of the library on the host
:type host_path: str |
8,434 | def exec(self, *command_tokens, command_context=None, **command_env):
if self.adapter().match(command_context, **command_env) is False:
cmd = WCommandProto.join_tokens(*command_tokens)
spec = self.adapter().specification()
if spec is not None:
spec = [x.context_name() for x in spec]
spec.reverse()
spec = .join(spec)
raise RuntimeError( % (cmd, spec))
command_tokens = self.adapter().adapt(*command_tokens, command_context=command_context, **command_env)
return self.original_command().exec(*command_tokens, command_context=command_context, **command_env) | Execute command
:param command_tokens: command tokens to execute
:param command_context: command context
:param command_env: command environment
:return: WCommandResultProto |
8,435 | def main():
args = get_args()
args.start = date_parser.parse(args.start)
args.end = date_parser.parse(args.end)
args.step = timedelta(args.step)
config = Config(args.config)
times = [args.start + i * args.step for i in range(int((args.end - args.start) / args.step))]
for i, time in enumerate(times):
make_plot(time, config, args.step) | process the main task |
8,436 | def recv(self, size = None):
size = size if size is not None else 1500
return os.read(self.fd, size) | Receive a buffer. The default size is 1500, the
classical MTU. |
8,437 | def execute_update(self, update, safe=False):
assert len(update.update_data) > 0
self.queue.append(UpdateOp(self.transaction_id, self, update.query.type, safe, update))
if self.autoflush:
return self.flush() | Execute an update expression. Should generally only be called implicitly. |
8,438 | def series(self, x: str, y: list, title: str = ) -> object:
code = "proc sgplot data=" + self.libref + + self.table + self._dsopts() + ";\n"
if len(title) > 0:
code += + title +
if isinstance(y, list):
num = len(y)
else:
num = 1
y = [y]
for i in range(num):
code += "\tseries x=" + x + " y=" + str(y[i]) + ";\n"
code += +
if self.sas.nosub:
print(code)
return
ll = self._is_valid()
if not ll:
html = self.HTML
self.HTML = 1
ll = self.sas._io.submit(code)
self.HTML = html
if not self.sas.batch:
self.sas.DISPLAY(self.sas.HTML(ll[]))
else:
return ll | This method plots a series of x,y coordinates. You can provide a list of y columns for multiple line plots.
:param x: the x axis variable; generally a time or continuous variable.
:param y: the y axis variable(s), you can specify a single column or a list of columns
:param title: an optional Title for the chart
:return: graph object |
8,439 | def add_volume(self, hostpath, contpath, options=None):
if options is None:
options = []
self.volumes.append((hostpath, contpath, options)) | Add a volume (bind-mount) to the docker run invocation |
8,440 | def forwards(apps, schema_editor):
starts = timeutils.round_datetime(
when=timezone.now(),
precision=timedelta(days=1),
rounding=timeutils.ROUND_DOWN)
ends = starts + appsettings.DEFAULT_ENDS_DELTA
recurrence_rules = dict(
RecurrenceRule.objects.values_list(, ))
daily = recurrence_rules[]
weekdays = recurrence_rules[]
weekends = recurrence_rules[]
weekly = recurrence_rules[]
monthly = recurrence_rules[]
yearly = recurrence_rules[]
daily_event = G(
EventBase,
title=,
starts=starts + timedelta(hours=9),
ends=ends + timedelta(hours=9),
recurrence_rule=daily,
)
weekday_event = G(
EventBase,
title=,
starts=starts + timedelta(hours=11),
ends=ends + timedelta(hours=11),
recurrence_rule=weekdays,
)
weekend_event = G(
EventBase,
title=,
starts=starts + timedelta(hours=13),
ends=ends + timedelta(hours=13),
recurrence_rule=weekends,
)
weekly_event = G(
EventBase,
title=,
starts=starts + timedelta(hours=15),
ends=ends + timedelta(hours=15),
recurrence_rule=weekly,
)
monthly_event = G(
EventBase,
title=,
starts=starts + timedelta(hours=17),
ends=ends + timedelta(hours=17),
recurrence_rule=monthly,
)
yearly_event = G(
EventBase,
title=,
starts=starts + timedelta(hours=19),
ends=ends + timedelta(hours=19),
recurrence_rule=yearly,
) | Create sample events. |
8,441 | def PopState(self, **_):
try:
self.state = self.state_stack.pop()
if self.verbose:
logging.debug("Returned state to %s", self.state)
return self.state
except IndexError:
self.Error("Tried to pop the state but failed - possible recursion error") | Pop the previous state from the stack. |
8,442 | def read_annotations(path_or_file, separator=, reset=True):
annotations = OrderedDict({})
with PathOrFile(path_or_file, , reset=reset) as f:
annotations[] = f.readline().strip().split(separator)
for line in f:
if line.startswith():
tokens = line.strip().split(separator)
_name, first_value = tokens[0].split()
name = _name.replace(, )
values = [first_value] + tokens[1:]
if name == :
colnames = annotations[]
annotations[] = {colnames[i]: perseus_to_dtype[x] for i, x in enumerate(values) if x in perseus_to_dtype}
annotations[] = {colnames[i]: converters[x] for i, x in enumerate(values) if x in converters}
annotations[name] = values
return annotations | Read all annotations from the specified file.
>>> annotations = read_annotations(path_or_file, separator)
>>> colnames = annotations['Column Name']
>>> types = annotations['Type']
>>> annot_row = annotations['Annot. row name']
:param path_or_file: Path or file-like object
:param separator: Column separator
:param reset: Reset the file after reading. Useful for file-like, no-op for paths.
:returns: Ordered dictionary of annotations. |
8,443 | def get_data(self) -> bytes:
data = {
"_class_name": self.__class__.__name__,
"version": 1,
"segments_bin": SegmentSequence([self.command_seg, self.tan_request]).render_bytes(),
"resume_method": self.resume_method,
"tan_request_structured": self.tan_request_structured,
}
return compress_datablob(DATA_BLOB_MAGIC_RETRY, 1, data) | Return a compressed datablob representing this object.
To restore the object, use :func:`fints.client.NeedRetryResponse.from_data`. |
8,444 | def get_remote(self, key, default=None, scope=None):
return self.conversation(scope).get_remote(key, default) | Get data from the remote end(s) of the :class:`Conversation` with the given scope.
In Python, this is equivalent to::
relation.conversation(scope).get_remote(key, default)
See :meth:`conversation` and :meth:`Conversation.get_remote`. |
8,445 | def report(self):
if self.logger:
self.logger.info("accessed parameters:")
for key in self.used_parameters:
self.logger.info(" - %s %s" % (key, "(undefined)" if key in self.undefined_parameters else "")) | Report usage of training parameters. |
8,446 | def _add_tag(self, tag):
tags = self.data.get(, None)
if tags:
if tag in [x[] for x in tags]:
return False
else:
tags = list()
tags.append({: tag})
self.data[] = tags
return True | Add a tag
Args:
tag (str): Tag to add
Returns:
bool: True if tag added or False if tag already present |
8,447 | def parse_time(val, fmt=None):
if isinstance(val, time):
return val
else:
return parse_datetime(val, fmt).time() | Returns a time object parsed from :val:.
:param val: a string to be parsed as a time
:param fmt: a format string, a tuple of format strings, or None. If None a built in list of format
strings will be used to try to parse the time. |
8,448 | def speed(self, factor, use_semitones=False):
self.command.append("speed")
self.command.append(factor if not use_semitones else str(factor) + "c")
return self | speed takes 2 parameters: factor and use-semitones (True or False).
When use-semitones = False, a factor of 2 doubles the speed and raises the pitch an octave. The same result is achieved with factor = 1200 and use semitones = True. |
8,449 | def close(self):
os.close(self._fd)
self._fd = -1
self._addr = -1
self._pec = 0 | close()
Disconnects the object from the bus. |
8,450 | def from_bam(pysam_samfile, loci, normalized_contig_names=True):
chr
loci = [to_locus(obj) for obj in loci]
close_on_completion = False
if typechecks.is_string(pysam_samfile):
pysam_samfile = Samfile(pysam_samfile)
close_on_completion = True
try:
if normalized_contig_names:
chromosome_name_map = {}
for name in pysam_samfile.references:
normalized = pyensembl.locus.normalize_chromosome(name)
chromosome_name_map[normalized] = name
chromosome_name_map[name] = name
else:
chromosome_name_map = None
result = PileupCollection({})
locus_iterator = itertools.chain.from_iterable(
(Locus.from_interbase_coordinates(locus_interval.contig, pos)
for pos
in locus_interval.positions)
for locus_interval in sorted(loci))
for locus in locus_iterator:
result.pileups[locus] = Pileup(locus, [])
if normalized_contig_names:
try:
chromosome = chromosome_name_map[locus.contig]
except KeyError:
logging.warn("No such contig in bam: %s" % locus.contig)
continue
else:
chromosome = locus.contig
columns = pysam_samfile.pileup(
chromosome,
locus.position,
locus.position + 1,
truncate=True,
stepper="nofilter")
try:
column = next(columns)
except StopIteration:
continue
pileups = column.pileups
assert list(columns) == []
for pileup_read in pileups:
if not pileup_read.is_refskip:
element = PileupElement.from_pysam_alignment(
locus, pileup_read)
result.pileups[locus].append(element)
return result
finally:
if close_on_completion:
pysam_samfile.close() | Create a PileupCollection for a set of loci from a BAM file.
Parameters
----------
pysam_samfile : `pysam.Samfile` instance, or filename string
to a BAM file. The BAM file must be indexed.
loci : list of Locus instances
Loci to collect pileups for.
normalized_contig_names : whether the contig names have been normalized
(e.g. pyensembl removes the 'chr' prefix). Set to true to
de-normalize the names when querying the BAM file.
Returns
----------
PileupCollection instance containing pileups for the specified loci.
All alignments in the BAM file are included (e.g. duplicate reads,
secondary alignments, etc.). See `PileupCollection.filter` if these
need to be removed. |
8,451 | def get_connection_by_node(self, node):
self._checkpid()
self.nodes.set_node_name(node)
try:
connection = self._available_connections.get(node["name"], []).pop()
except IndexError:
connection = self.make_connection(node)
self._in_use_connections.setdefault(node["name"], set()).add(connection)
return connection | get a connection by node |
8,452 | def unwrap_or_else(self, op: Callable[[E], U]) -> Union[T, U]:
return cast(T, self._val) if self._is_ok else op(cast(E, self._val)) | Returns the sucess value in the :class:`Result` or computes a default
from the error value.
Args:
op: The function to computes default with.
Returns:
The success value in the :class:`Result` if it is
a :meth:`Result.Ok` value, otherwise ``op(E)``.
Examples:
>>> Ok(1).unwrap_or_else(lambda e: e * 10)
1
>>> Err(1).unwrap_or_else(lambda e: e * 10)
10 |
8,453 | def _execute(self, stmt, *values):
c = self._cursor()
try:
return c.execute(stmt, values).fetchone()
finally:
c.close() | Gets a cursor, executes `stmt` and closes the cursor,
fetching one row afterwards and returning its result. |
8,454 | def optimize(self, commit=True, waitFlush=None, waitSearcher=None, maxSegments=None, handler=):
if maxSegments:
msg = % maxSegments
else:
msg =
return self._update(msg, commit=commit, waitFlush=waitFlush, waitSearcher=waitSearcher, handler=handler) | Tells Solr to streamline the number of segments used, essentially a
defragmentation operation.
Optionally accepts ``maxSegments``. Default is ``None``.
Optionally accepts ``waitFlush``. Default is ``None``.
Optionally accepts ``waitSearcher``. Default is ``None``.
Usage::
solr.optimize() |
8,455 | def get_broadcast_events(cls, script):
events = Counter()
for name, _, block in cls.iter_blocks(script):
if in name:
if isinstance(block.args[0], kurt.Block):
events[True] += 1
else:
events[block.args[0].lower()] += 1
return events | Return a Counter of event-names that were broadcast.
The Count will contain the key True if any of the broadcast blocks
contain a parameter that is a variable. |
8,456 | def mcmc_CH(self, walkerRatio, n_run, n_burn, mean_start, sigma_start, threadCount=1, init_pos=None, mpi=False):
lowerLimit, upperLimit = self.lower_limit, self.upper_limit
mean_start = np.maximum(lowerLimit, mean_start)
mean_start = np.minimum(upperLimit, mean_start)
low_start = mean_start - sigma_start
high_start = mean_start + sigma_start
low_start = np.maximum(lowerLimit, low_start)
high_start = np.minimum(upperLimit, high_start)
sigma_start = (high_start - low_start) / 2
mean_start = (high_start + low_start) / 2
params = np.array([mean_start, lowerLimit, upperLimit, sigma_start]).T
chain = LikelihoodComputationChain(
min=lowerLimit,
max=upperLimit)
temp_dir = tempfile.mkdtemp("Hammer")
file_prefix = os.path.join(temp_dir, "logs")
chain.addLikelihoodModule(self.chain)
chain.setup()
store = InMemoryStorageUtil()
if mpi is True:
sampler = MpiCosmoHammerSampler(
params=params,
likelihoodComputationChain=chain,
filePrefix=file_prefix,
walkersRatio=walkerRatio,
burninIterations=n_burn,
sampleIterations=n_run,
threadCount=1,
initPositionGenerator=init_pos,
storageUtil=store)
else:
sampler = CosmoHammerSampler(
params=params,
likelihoodComputationChain=chain,
filePrefix=file_prefix,
walkersRatio=walkerRatio,
burninIterations=n_burn,
sampleIterations=n_run,
threadCount=threadCount,
initPositionGenerator=init_pos,
storageUtil=store)
time_start = time.time()
if sampler.isMaster():
print()
print(, len(mean_start)*walkerRatio)
print(, n_burn)
print(, n_run)
sampler.startSampling()
if sampler.isMaster():
time_end = time.time()
print(time_end - time_start, )
try:
shutil.rmtree(temp_dir)
except Exception as ex:
print(ex, )
pass
return store.samples, store.prob | runs mcmc on the parameter space given parameter bounds with CosmoHammerSampler
returns the chain |
8,457 | def sample(self, num_samples=1000, hmc_iters=20):
params = np.empty((num_samples,self.p.size))
for i in range(num_samples):
self.p[:] = np.random.multivariate_normal(np.zeros(self.p.size),self.M)
H_old = self._computeH()
theta_old = self.model.optimizer_array.copy()
params[i] = self.model.unfixed_param_array
self._update(hmc_iters)
H_new = self._computeH()
if H_old>H_new:
k = 1.
else:
k = np.exp(H_old-H_new)
if np.random.rand()<k:
params[i] = self.model.unfixed_param_array
else:
self.model.optimizer_array = theta_old
return params | Sample the (unfixed) model parameters.
:param num_samples: the number of samples to draw (1000 by default)
:type num_samples: int
:param hmc_iters: the number of leap-frog iterations (20 by default)
:type hmc_iters: int
:return: the list of parameters samples with the size N x P (N - the number of samples, P - the number of parameters to sample)
:rtype: numpy.ndarray |
8,458 | def less_strict_bool(x):
if x is None:
return False
elif x is True or x is False:
return x
else:
return strict_bool(x) | Idempotent and None-safe version of strict_bool. |
8,459 | def invoke(self, script_hash, params, **kwargs):
contract_params = encode_invocation_params(params)
raw_result = self._call(
JSONRPCMethods.INVOKE.value, [script_hash, contract_params, ], **kwargs)
return decode_invocation_result(raw_result) | Invokes a contract with given parameters and returns the result.
It should be noted that the name of the function invoked in the contract should be part of
paramaters.
:param script_hash: contract script hash
:param params: list of paramaters to be passed in to the smart contract
:type script_hash: str
:type params: list
:return: result of the invocation
:rtype: dictionary |
8,460 | def send(self, message):
for event in message.events:
self.events.append(event)
reply = riemann_client.riemann_pb2.Msg()
reply.ok = True
return reply | Adds a message to the list, returning a fake 'ok' response
:returns: A response message with ``ok = True`` |
8,461 | def get_resource_component_children(self, resource_component_id):
resource_type = self.resource_type(resource_component_id)
return self.get_resource_component_and_children(
resource_component_id, resource_type
) | Given a resource component, fetches detailed metadata for it and all of its children.
This is implemented using ArchivesSpaceClient.get_resource_component_children and uses its default options when fetching children.
:param string resource_component_id: The URL of the resource component from which to fetch metadata. |
8,462 | def buffered_read(fh, lock, offsets, bytecounts, buffersize=None):
if buffersize is None:
buffersize = 2**26
length = len(offsets)
i = 0
while i < length:
data = []
with lock:
size = 0
while size < buffersize and i < length:
fh.seek(offsets[i])
bytecount = bytecounts[i]
data.append(fh.read(bytecount))
size += bytecount
i += 1
for segment in data:
yield segment | Return iterator over segments read from file. |
8,463 | def train_model(
self,
L_train,
Y_dev=None,
deps=[],
class_balance=None,
log_writer=None,
**kwargs,
):
self.config = recursive_merge_dicts(self.config, kwargs, misses="ignore")
train_config = self.config["train_config"]
if log_writer is not None:
raise NotImplementedError("Logging for LabelModel.")
l2 = train_config.get("l2", 0)
self._set_class_balance(class_balance, Y_dev)
self._set_constants(L_train)
self._set_dependencies(deps)
self._check_L(L_train)
self.inv_form = len(self.deps) > 0
dataset = MetalDataset([0], [0])
train_loader = DataLoader(dataset)
if self.inv_form:
if self.config["verbose"]:
print("Computing O^{-1}...")
self._generate_O_inv(L_train)
self._init_params()
if self.config["verbose"]:
print("Estimating Z...")
self._train_model(train_loader, self.loss_inv_Z)
self.Q = torch.from_numpy(self.get_Q()).float()
if self.config["verbose"]:
print("Estimating \mu...")
self._train_model(train_loader, partial(self.loss_inv_mu, l2=l2))
else:
if self.config["verbose"]:
print("Computing O...")
self._generate_O(L_train)
self._init_params()
if self.config["verbose"]:
print("Estimating \mu...")
self._train_model(train_loader, partial(self.loss_mu, l2=l2)) | Train the model (i.e. estimate mu) in one of two ways, depending on
whether source dependencies are provided or not:
Args:
L_train: An [n,m] scipy.sparse matrix with values in {0,1,...,k}
corresponding to labels from supervision sources on the
training set
Y_dev: Target labels for the dev set, for estimating class_balance
deps: (list of tuples) known dependencies between supervision
sources. If not provided, sources are assumed to be independent.
TODO: add automatic dependency-learning code
class_balance: (np.array) each class's percentage of the population
(1) No dependencies (conditionally independent sources): Estimate mu
subject to constraints:
(1a) O_{B(i,j)} - (mu P mu.T)_{B(i,j)} = 0, for i != j, where B(i,j)
is the block of entries corresponding to sources i,j
(1b) np.sum( mu P, 1 ) = diag(O)
(2) Source dependencies:
- First, estimate Z subject to the inverse form
constraint:
(2a) O_\Omega + (ZZ.T)_\Omega = 0, \Omega is the deps mask
- Then, compute Q = mu P mu.T
- Finally, estimate mu subject to mu P mu.T = Q and (1b) |
8,464 | def config_start(self):
_LOGGER.info("Config start")
success, _ = self._make_request(
SERVICE_DEVICE_CONFIG, "ConfigurationStarted", {"NewSessionID": SESSION_ID})
self.config_started = success
return success | Start a configuration session.
For managing router admin functionality (ie allowing/blocking devices) |
8,465 | def change_host_check_timeperiod(self, host, timeperiod):
host.modified_attributes |= DICT_MODATTR["MODATTR_CHECK_TIMEPERIOD"].value
host.check_period = timeperiod
self.send_an_element(host.get_update_status_brok()) | Modify host check timeperiod
Format of the line that triggers function call::
CHANGE_HOST_CHECK_TIMEPERIOD;<host_name>;<timeperiod>
:param host: host to modify check timeperiod
:type host: alignak.objects.host.Host
:param timeperiod: timeperiod object
:type timeperiod: alignak.objects.timeperiod.Timeperiod
:return: None |
8,466 | def request_time_facet(field, time_filter, time_gap, time_limit=100):
start, end = parse_datetime_range(time_filter)
key_range_start = "f.{0}.facet.range.start".format(field)
key_range_end = "f.{0}.facet.range.end".format(field)
key_range_gap = "f.{0}.facet.range.gap".format(field)
key_range_mincount = "f.{0}.facet.mincount".format(field)
if time_gap:
gap = gap_to_sorl(time_gap)
else:
gap = compute_gap(start, end, time_limit)
value_range_start = start.get("parsed_datetime")
if start.get("is_common_era"):
value_range_start = start.get("parsed_datetime").isoformat().replace("+00:00", "") + "Z"
value_range_end = start.get("parsed_datetime")
if end.get("is_common_era"):
value_range_end = end.get("parsed_datetime").isoformat().replace("+00:00", "") + "Z"
value_range_gap = gap
params = {
: field,
key_range_start: value_range_start,
key_range_end: value_range_end,
key_range_gap: value_range_gap,
key_range_mincount: 1
}
return params | time facet query builder
:param field: map the query to this field.
:param time_limit: Non-0 triggers time/date range faceting. This value is the maximum number of time ranges to
return when a.time.gap is unspecified. This is a soft maximum; less will usually be returned.
A suggested value is 100.
Note that a.time.gap effectively ignores this value.
See Solr docs for more details on the query/response format.
:param time_filter: From what time range to divide by a.time.gap into intervals.
Defaults to q.time and otherwise 90 days.
:param time_gap: The consecutive time interval/gap for each time range. Ignores a.time.limit.
The format is based on a subset of the ISO-8601 duration format
:return: facet.range=manufacturedate_dt&f.manufacturedate_dt.facet.range.start=2006-02-11T15:26:37Z&f.
manufacturedate_dt.facet.range.end=2006-02-14T15:26:37Z&f.manufacturedate_dt.facet.range.gap=+1DAY |
8,467 | def _parse_myinfo(client, command, actor, args):
_, server, version, usermodes, channelmodes = args.split(None, 5)[:5]
s = client.server
s.host = server
s.version = version
s.user_modes = set(usermodes)
s.channel_modes = set(channelmodes) | Parse MYINFO and update the Host object. |
8,468 | def validate_reaction(self):
if self.reaction not in self._reaction_valid_values:
raise ValueError("reaction should be one of: {valid}".format(
valid=", ".join(self._reaction_valid_values)
)) | Ensure reaction is of a certain type.
Mainly for future expansion. |
8,469 | def call(self, inpt):
if inpt is Manager.NONE_INPUT:
return False
argument_instance = self.argument(inpt)
if not argument_instance.applies:
return False
application = self.__apply(argument_instance, inpt)
if self.negative:
application = not application
return application | Returns if the condition applies to the ``inpt``.
If the class ``inpt`` is an instance of is not the same class as the
condition's own ``argument``, then ``False`` is returned. This also
applies to the ``NONE`` input.
Otherwise, ``argument`` is called, with ``inpt`` as the instance and
the value is compared to the ``operator`` and the Value is returned. If
the condition is ``negative``, then then ``not`` the value is returned.
Keyword Arguments:
inpt -- An instance of the ``Input`` class. |
8,470 | def get_conn(self):
db = self.get_connection(getattr(self, self.conn_name_attr))
return self.connector.connect(
host=db.host,
port=db.port,
username=db.login,
schema=db.schema) | Returns a connection object |
8,471 | def validate_brain_requirements(connection, remote_dbs, requirements):
for database in requirements:
assert (database in remote_dbs), "database {} must exist".format(database)
remote_tables = frozenset(rethinkdb.db(database).table_list().run(connection))
for table in requirements[database]:
assert (table in remote_tables), "{} must exist in {}".format(table, database)
return True | validates the rethinkdb has the 'correct' databases and tables
should get remote_dbs from brain.connection.validate_get_dbs
:param connection: <rethinkdb.net.DefaultConnection>
:param remote_dbs: <set> database names present in remote database
:param requirements: <dict> example(brain.connection.SELF_TEST)
:return: <bool> True
:raises: AssertionError or Reql*Error |
8,472 | def _get_top_of_rupture_depth_term(self, C, imt, rup):
if rup.ztor >= 20.0:
return C[]
else:
return C[] * rup.ztor / 20.0 | Compute and return top of rupture depth term. See paragraph
'Depth-to-Top of Rupture Model', page 1042. |
8,473 | def dropping(n):
if n < 0:
raise ValueError("Cannot drop fewer than zero ({}) items".format(n))
def dropping_transducer(reducer):
return Dropping(reducer, n)
return dropping_transducer | Create a transducer which drops the first n items |
8,474 | def add_filter(self, filter_or_string, *args, **kwargs):
self.filters.append(build_filter(filter_or_string, *args, **kwargs))
return self | Appends a filter. |
8,475 | def exit_with_error(msg=, details=None, code=-1, *args, **kwargs):
error(msg, details=details, *args, **kwargs)
sys.exit(code) | Exit with error |
8,476 | def get(key, default=None):
**["key1", "key2"]
store = load()
if isinstance(key, six.string_types):
return store.get(key, default)
elif default is None:
return [store[k] for k in key if k in store]
else:
return [store.get(k, default) for k in key] | Get a (list of) value(s) from the minion datastore
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' data.get key
salt '*' data.get '["key1", "key2"]' |
8,477 | def setContentsMargins(self, left, top, right, bottom):
self._margins = (left, top, right, bottom)
self.adjustTitleFont() | Sets the contents margins for this node to the inputed values.
:param left | <int>
top | <int>
right | <int>
bottom | <int> |
8,478 | def get_url(self, resource, params=None):
pattern = r
resource = re.sub(pattern, lambda t: str(params.get(t.group(1), )), resource)
parts = (self.endpoint, , resource)
return .join(map(lambda x: str(x).strip(), parts)) | Generate url for request |
8,479 | def local_histogram_equalization(data, mask_to_equalize, valid_data_mask=None, number_of_bins=1000,
std_mult_cutoff=3.0,
do_zerotoone_normalization=True,
local_radius_px=300,
clip_limit=60.0,
slope_limit=3.0,
do_log_scale=True,
log_offset=0.00001,
out=None
):
out = out if out is not None else np.zeros_like(data)
cumulative_dist_function, temp_bins = None, None
if mask_valid_data_in_tile.any():
temp_valid_data = data[min_row:max_row, min_col:max_col][
mask_valid_data_in_tile]
temp_valid_data = temp_valid_data[
temp_valid_data >= 0
]
if std_mult_cutoff is not None:
avg = np.mean(temp_valid_data)
std = np.std(temp_valid_data)
concervative_mask = (
temp_valid_data < (avg + std * std_mult_cutoff)) & (
temp_valid_data > (avg - std * std_mult_cutoff))
temp_valid_data = temp_valid_data[concervative_mask]
if do_log_scale:
temp_valid_data = np.log(temp_valid_data + log_offset)
if temp_valid_data.size > 0:
cumulative_dist_function, temp_bins = _histogram_equalization_helper(
temp_valid_data,
number_of_bins,
clip_limit=clip_limit,
slope_limit=slope_limit)
all_cumulative_dist_functions[num_row_tile].append(
cumulative_dist_function)
all_bin_information[num_row_tile].append(temp_bins)
tile_weights = _calculate_weights(tile_size)
for num_row_tile in range(row_tiles):
for num_col_tile in range(col_tiles):
min_row = num_row_tile * tile_size
max_row = min_row + tile_size
min_col = num_col_tile * tile_size
max_col = min_col + tile_size
temp_all_data = data[min_row:max_row, min_col:max_col].copy()
temp_mask_to_equalize = mask_to_equalize[min_row:max_row, min_col:
max_col]
temp_all_valid_data_mask = valid_data_mask[min_row:max_row,
min_col:max_col]
if temp_mask_to_equalize.any():
if do_log_scale:
temp_all_data[temp_all_valid_data_mask] = np.log(
temp_all_data[temp_all_valid_data_mask] + log_offset)
temp_data_to_equalize = temp_all_data[temp_mask_to_equalize]
temp_all_valid_data = temp_all_data[temp_all_valid_data_mask]
temp_sum = np.zeros_like(temp_data_to_equalize)
unused_weight = np.zeros(temp_data_to_equalize.shape,
dtype=tile_weights.dtype)
for weight_row in range(3):
for weight_col in range(3):
temp_equalized_data = np.interp(
temp_all_valid_data, all_bin_information[
calculated_row][calculated_col][:-1],
all_cumulative_dist_functions[calculated_row][
calculated_col])
temp_equalized_data = temp_equalized_data[np.where(
temp_mask_to_equalize[
temp_all_valid_data_mask])]
out[min_row:max_row, min_col:max_col][
temp_mask_to_equalize] = temp_sum
if do_zerotoone_normalization:
_linear_normalization_from_0to1(out, mask_to_equalize, number_of_bins)
return out | Equalize the provided data (in the mask_to_equalize) using adaptive histogram equalization.
tiles of width/height (2 * local_radius_px + 1) will be calculated and results for each pixel will be bilinerarly
interpolated from the nearest 4 tiles when pixels fall near the edge of the image (there is no adjacent tile) the
resultant interpolated sum from the available tiles will be multipled to account for the weight of any missing
tiles::
pixel total interpolated value = pixel available interpolated value / (1 - missing interpolation weight)
if ``do_zerotoone_normalization`` is True the data will be scaled so that all data in the mask_to_equalize falls
between 0 and 1; otherwise the data in mask_to_equalize will all fall between 0 and number_of_bins
Returns:
The equalized data |
8,480 | def parse_inventory_category(name, info, countable=True):
raw = info["data"][1:]
cur = 0
if countable:
count = struct.unpack("B", raw[cur])[0]
cur += 1
else:
count = 0
discarded = 0
entries = []
while cur < len(raw):
read, cpu = categories[name]["parser"](raw[cur:])
cur = cur + read
if cpu is None:
discarded += 1
continue
if not countable:
count += 1
cpu["index"] = count
entries.append(cpu)
if cur != len(raw):
raise Exception
if count - discarded != len(entries):
raise Exception
return entries | Parses every entry in an inventory category (CPU, memory, PCI, drives,
etc).
Expects the first byte to be a count of the number of entries, followed
by a list of elements to be parsed by a dedicated parser (below).
:param name: the name of the parameter (e.g.: "cpu")
:param info: a list of integers with raw data read from an IPMI requests
:param countable: whether the data have an entries count field
:returns: dict -- a list of entries in the category. |
8,481 | def as_cnpjcpf(numero):
if is_cnpj(numero):
return as_cnpj(numero)
elif is_cpf(numero):
return as_cpf(numero)
return numero | Formata um número de CNPJ ou CPF. Se o número não for um CNPJ ou CPF
válidos apenas retorna o argumento sem qualquer modificação. |
8,482 | def loglike(self, y, f):
r
y, f = np.broadcast_arrays(y, f)
if self.tranfcn == :
g = np.exp(f)
logg = f
else:
g = softplus(f)
logg = np.log(g)
return y * logg - g - gammaln(y + 1) | r"""
Poisson log likelihood.
Parameters
----------
y: ndarray
array of integer targets
f: ndarray
latent function from the GLM prior (:math:`\mathbf{f} =
\boldsymbol\Phi \mathbf{w}`)
Returns
-------
logp: ndarray
the log likelihood of each y given each f under this
likelihood. |
8,483 | def return_item(self, item, priority):
conn = redis.StrictRedis(connection_pool=self.pool)
self._run_expiration(conn)
script = conn.register_script()
if priority is None: priority = "None"
result = script(keys=[self._key_available(), self._key_expiration(),
self._key_priorities(), self._key_workers(),
self._key_reservations(item)],
args=[item, priority, self._get_worker_id(conn)])
if result == -1: raise LostLease(item)
return | Complete work on an item from ``check_out_item()``.
If this instance no longer owns ``item``, raise ``LostLease``.
If ``priority`` is None, the item is removed from the queue;
otherwise it is re-added with the specified priority. Any
locked items associated with this item are unlocked. |
8,484 | def get_description_by_type(self, type_p):
if not isinstance(type_p, VirtualSystemDescriptionType):
raise TypeError("type_p can only be an instance of type VirtualSystemDescriptionType")
(types, refs, ovf_values, v_box_values, extra_config_values) = self._call("getDescriptionByType",
in_p=[type_p])
types = [VirtualSystemDescriptionType(a) for a in types]
return (types, refs, ovf_values, v_box_values, extra_config_values) | This is the same as :py:func:`get_description` except that you can specify which types
should be returned.
in type_p of type :class:`VirtualSystemDescriptionType`
out types of type :class:`VirtualSystemDescriptionType`
out refs of type str
out ovf_values of type str
out v_box_values of type str
out extra_config_values of type str |
8,485 | def __prepare_info_from_dicomdir_file(self, writedicomdirfile=True):
createdcmdir = True
dicomdirfile = os.path.join(self.dirpath, self.dicomdir_filename)
ftype =
if os.path.exists(dicomdirfile):
try:
dcmdirplus = misc.obj_from_file(dicomdirfile, ftype)
if dcmdirplus[] == __version__:
createdcmdir = False
dcmdir = dcmdirplus[]
except Exception:
logger.debug()
createdcmdir = True
if createdcmdir or self.force_create_dicomdir:
dcmdirplus = self._create_dicomdir_info()
dcmdir = dcmdirplus[]
if (writedicomdirfile) and len(dcmdir) > 0:
try:
misc.obj_to_file(dcmdirplus, dicomdirfile, ftype)
except:
logger.warning()
traceback.print_exc()
dcmdir = dcmdirplus[]
self.dcmdirplus = dcmdirplus
self.files_with_info = dcmdir
return dcmdir | Check if exists dicomdir file and load it or cerate it
dcmdir = get_dir(dirpath)
dcmdir: list with filenames, SeriesNumber and SliceLocation |
8,486 | def build_columns(self, X, term=-1, verbose=False):
if term == -1:
term = range(len(self._terms))
term = list(np.atleast_1d(term))
columns = []
for term_id in term:
columns.append(self._terms[term_id].build_columns(X, verbose=verbose))
return sp.sparse.hstack(columns, format=) | construct the model matrix columns for the term
Parameters
----------
X : array-like
Input dataset with n rows
verbose : bool
whether to show warnings
Returns
-------
scipy sparse array with n rows |
8,487 | def append_from_dict(self, the_dict):
m = Measurement.from_dict(the_dict)
self.append(m) | Creates a ``measurement.Measurement`` object from the supplied dict
and then appends it to the buffer
:param the_dict: dict |
8,488 | def register(handler,
op,
safe_init = None,
at_start = None,
name = None,
at_stop = None,
static = False,
root = None,
replacement = None,
charset = DEFAULT_CHARSET,
content_type = None,
to_auth = False,
to_log = True):
ref_cmd = _NCMD
is_reg = False
if isinstance(name, re._pattern_type):
key = name.pattern
ref_cmd = _RCMD
is_reg = True
elif name:
key = name
replacement = None
else:
key = handler.__name__
name = handler.__name__
replacement = None
methods = []
if not isinstance(op, (list, tuple)):
op = [op.upper()]
for x in op:
x = x.upper()
if x not in _METHODS:
raise ValueError("unknown HTTP method: %r" % x)
if static and x not in (, ):
raise ValueError("Static must be GET, HEAD command")
methods.append(x)
if not methods:
raise ValueError("Missing HTTP method")
if static and not root:
raise ValueError("Missing root argument for static")
cmd = Command(name,
handler,
methods,
safe_init,
at_start,
at_stop,
static,
root,
replacement,
charset,
content_type,
to_auth,
to_log)
for method in methods:
if not is_reg:
mkey = "%s /%s" % (method, key)
else:
mkey = "%s %s" % (method, key)
if mkey in _COMMANDS:
raise ValueError("%s is already registred" % name)
_COMMANDS[mkey] = cmd
ref_cmd[mkey] = _COMMANDS[mkey] | Register a command
@handler: function to execute when the command is received
@op: http method(s)
@safe_init: called by the safe_init() function of this module
@at_start: called once just before the server starts
@at_stop: called once just before the server stops
@name: name of the command (if not name, handler.__name__ is used)
@static: render static file
@root: root path
@replacement: rewrite path when name is regexp
@charset: charset
@content_type: content_type
@to_auth: use basic authentification if True
@to_log: log request if True
prototypes:
handler(args)
safe_init(options)
at_start(options)
at_stop() |
8,489 | def _get_focused_item(self):
focused_model = self._selection.focus
if not focused_model:
return None
return self.canvas.get_view_for_model(focused_model) | Returns the currently focused item |
8,490 | def imag(self, newimag):
if self.space.is_real:
raise ValueError()
self.tensor.imag = newimag | Set the imaginary part of this element to ``newimag``.
This method is invoked by ``x.imag = other``.
Parameters
----------
newimag : array-like or scalar
Values to be assigned to the imaginary part of this element.
Raises
------
ValueError
If the space is real, i.e., no imagninary part can be set. |
8,491 | def load(self, module_name):
module_name, path = self.lookup(module_name)
if path:
with open(path, ) as f:
return module_name, f.read().decode()
return None, None | Returns source code and normalized module id of the given module.
Only supports source code files encoded as UTF-8 |
8,492 | def get_shards(self, *args, full_response=False):
resp = self.request(shards=args, full_response=full_response)
return resp | Get Shards |
8,493 | def _imm_new(cls):
imm = object.__new__(cls)
params = cls._pimms_immutable_data_[]
for (p,dat) in six.iteritems(params):
dat = dat[0]
if dat: object.__setattr__(imm, p, dat[0])
_imm_clear(imm)
dd = object.__getattribute__(imm, )
dd[] = True
return imm | All immutable new classes use a hack to make sure the post-init cleanup occurs. |
8,494 | def fso_listdir(self, path):
path = self.deref(path)
if not stat.S_ISDIR(self._stat(path).st_mode):
raise OSError(20, , path)
try:
ret = self.originals[](path)
except Exception:
ret = []
for entry in self.entries.values():
if not entry.path.startswith(path + ):
continue
subpath = entry.path[len(path) + 1:]
if in subpath:
continue
if entry.mode is None:
if subpath in ret:
ret.remove(subpath)
else:
if subpath not in ret:
ret.append(subpath)
return ret | overlays os.listdir() |
8,495 | def unsubscribe(self, subscriber: ) -> None:
for i, _s in enumerate(self._subscriptions):
if _s is subscriber:
self._subscriptions.pop(i)
return
raise SubscriptionError() | Unsubscribe the given subscriber
:param subscriber: subscriber to unsubscribe
:raises SubscriptionError: if subscriber is not subscribed (anymore) |
8,496 | def tag_supplementary_material_sibling_ordinal(tag):
if hasattr(tag, ) and tag.name != :
return None
nodenames = [,,]
first_parent_tag = first_parent(tag, nodenames)
sibling_ordinal = 1
if first_parent_tag:
for supp_tag in first_parent_tag.find_all(tag.name):
if tag == supp_tag:
break
if supp_asset(supp_tag) == supp_asset(tag):
sibling_ordinal += 1
else:
for prev_tag in tag.find_all_previous(tag.name):
if not first_parent(prev_tag, nodenames):
if supp_asset(prev_tag) == supp_asset(tag):
sibling_ordinal += 1
return sibling_ordinal | Strategy is to count the previous supplementary-material tags
having the same asset value to get its sibling ordinal.
The result is its position inside any parent tag that
are the same asset type |
8,497 | def _fake_closeenumeration(self, namespace, **params):
self._validate_namespace(namespace)
context_id = params[]
try:
context_data = self.enumeration_contexts[context_id]
except KeyError:
raise CIMError(
CIM_ERR_INVALID_ENUMERATION_CONTEXT,
_format("EnumerationContext {0!A} not found in mock server "
"enumeration contexts.", context_id))
if context_data[] != namespace:
raise CIMError(
CIM_ERR_INVALID_NAMESPACE,
_format("Invalid namespace {0!A} for CloseEnumeration {1!A}",
namespace, context_id))
del self.enumeration_contexts[context_id] | Implements WBEM server responder for
:meth:`~pywbem.WBEMConnection.CloseEnumeration`
with data from the instance repository.
If the EnumerationContext is valid it removes it from the
context repository. Otherwise it returns an exception. |
8,498 | def main(nodes, edges):
from matplotlib import pyplot as plt
from matplotlib.dates import date2num
from matplotlib.cm import gist_rainbow
print("building DAG")
G = random_dag(nodes, edges)
jobs = {}
pos = {}
colors = {}
for node in G:
jobs[node] = randomwait
client = parallel.Client()
view = client.load_balanced_view()
print("submitting %i tasks with %i dependencies"%(nodes,edges))
results = submit_jobs(view, G, jobs)
print("waiting for results")
view.wait()
print("done")
for node in G:
md = results[node].metadata
start = date2num(md.started)
runtime = date2num(md.completed) - start
pos[node] = (start, runtime)
colors[node] = md.engine_id
validate_tree(G, results)
nx.draw(G, pos, node_list=colors.keys(), node_color=colors.values(), cmap=gist_rainbow,
with_labels=False)
x,y = zip(*pos.values())
xmin,ymin = map(min, (x,y))
xmax,ymax = map(max, (x,y))
xscale = xmax-xmin
yscale = ymax-ymin
plt.xlim(xmin-xscale*.1,xmax+xscale*.1)
plt.ylim(ymin-yscale*.1,ymax+yscale*.1)
return G,results | Generate a random graph, submit jobs, then validate that the
dependency order was enforced.
Finally, plot the graph, with time on the x-axis, and
in-degree on the y (just for spread). All arrows must
point at least slightly to the right if the graph is valid. |
8,499 | def get_user_config():
initialconf = normpath(os.path.join(get_share_dir(), "linkcheckerrc"))
userconf = normpath("~/.linkchecker/linkcheckerrc")
if os.path.isfile(initialconf) and not os.path.exists(userconf) and \
not Portable:
try:
make_userdir(userconf)
shutil.copy(initialconf, userconf)
except Exception as errmsg:
msg = _("could not copy initial configuration file %(src)r to %(dst)r: %(errmsg)r")
args = dict(src=initialconf, dst=userconf, errmsg=errmsg)
log.warn(LOG_CHECK, msg % args)
return userconf | Get the user configuration filename.
If the user configuration file does not exist, copy it from the initial
configuration file, but only if this is not a portable installation.
Returns path to user config file (which might not exist due to copy
failures or on portable systems).
@return configuration filename
@rtype string |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.