code
stringlengths 59
3.37k
| docstring
stringlengths 8
15.5k
|
---|---|
def status():
for plugin, package, filename in available_migrations():
migration = get_migration(plugin, filename)
if migration:
status = green(migration['date'].strftime(DATE_FORMAT))
else:
status = yellow('Not applied')
log_status(plugin, filename, status)
|
Display the database migrations status
|
def trace_dispatch(self, frame, event, arg):
if hasattr(self, 'vimpdb'):
return self.vimpdb.trace_dispatch(frame, event, arg)
else:
return self._orig_trace_dispatch(frame, event, arg)
|
allow to switch to Vimpdb instance
|
def find(self, resource_id, query=None, **kwargs):
if query is None:
query = {}
return self.client._get(
self._url(resource_id),
query,
**kwargs
)
|
Gets a single resource.
|
def confirm(text, default=True):
if default:
legend = "[y]/n"
else:
legend = "y/[n]"
res = ""
while (res != "y") and (res != "n"):
res = raw_input(text + " ({}): ".format(legend)).lower()
if not res and default:
res = "y"
elif not res and not default:
res = "n"
if res[0] == "y":
return True
else:
return False
|
Console confirmation dialog based on raw_input.
|
def acquire(self):
self._condition.acquire()
try:
if self._maxsize is not None and self._block:
while not self._pool and self._nconnections == self._maxsize:
self._condition.wait(timeout=None)
while self._pool:
pooledconn = self._pool.pop(0)
if self._idlettl is not None and (pooledconn.released + self._idlettl) < time.time():
pooledconn.connection.close()
self._nconnections -= 1
else:
return pooledconn.connection
connection = self._dbapi2.connect(*(), **self._connection_args.copy())
self._nconnections += 1
return connection
finally:
self._condition.release()
|
Get a new connection from the pool.
This will return an existing connection, if one is available in the
pool, or create a new connection.
.. warning:: If the pool was created with `maxsize` and `block=True`,
this method may block until a connection is available in the pool.
|
def validate(data):
text = data.get('text')
if not isinstance(text, _string_types) or len(text) == 0:
raise ValueError('text field is required and should not be empty')
if 'markdown' in data and not type(data['markdown']) is bool:
raise ValueError('markdown field should be bool')
if 'attachments' in data:
if not isinstance(data['attachments'], (list, tuple)):
raise ValueError('attachments field should be list or tuple')
for attachment in data['attachments']:
if 'text' not in attachment and 'title' not in attachment:
raise ValueError('text or title is required in attachment')
return True
|
Validates incoming data
Args:
data(dict): the incoming data
Returns:
True if the data is valid
Raises:
ValueError: the data is not valid
|
def bind(self, environ):
self.environ = environ
self.path = '/' + environ.get('PATH_INFO', '/').lstrip('/')
self.method = environ.get('REQUEST_METHOD', 'GET').upper()
|
Bind a new WSGI environment.
This is done automatically for the global `bottle.request`
instance on every request.
|
def format(self, record):
message = super(ContainerEngineHandler, self).format(record)
return format_stackdriver_json(record, message)
|
Format the message into JSON expected by fluentd.
:type record: :class:`~logging.LogRecord`
:param record: the log record
:rtype: str
:returns: A JSON string formatted for GKE fluentd.
|
def works(self, member_id):
context = '%s/%s' % (self.ENDPOINT, str(member_id))
return Works(context=context)
|
This method retrieve a iterable of Works of the given member.
args: Member ID (Integer)
return: Works()
|
def linkUserToMostRecentCustomer(sender,**kwargs):
email_address = kwargs.get('email_address',None)
if not email_address or not email_address.primary or not email_address.verified:
return
user = email_address.user
if not hasattr(user, 'customer'):
last_reg = Registration.objects.filter(
customer__email=email_address.email,
customer__user__isnull=True,
dateTime__isnull=False
).order_by('-dateTime').first()
if last_reg:
customer = last_reg.customer
customer.user = user
customer.save()
if not user.first_name and not user.last_name:
user.first_name = customer.first_name
user.last_name = customer.last_name
user.save()
|
If a new primary email address has just been confirmed, check if the user
associated with that email has an associated customer object yet. If not,
then look for the customer with that email address who most recently
registered for something and that is not associated with another user.
Automatically associate the User with with Customer, and if missing, fill in
the user's name information with the Customer's name. This way, when a new
or existing customer creates a user account, they are seamlessly linked to
their most recent existing registration at the time they verify their email
address.
|
def run(self, target, args=()):
run_event = threading.Event()
run_event.set()
thread = threading.Thread(target=target, args=args + (run_event, ))
self.thread_pool.append(thread)
self.run_events.append(run_event)
thread.start()
|
Run a function in a separate thread.
:param target: the function to run.
:param args: the parameters to pass to the function.
|
def load_config(json_path):
with open(json_path, 'r') as json_file:
config = json.loads(json_file.read())
assert(config['tree'][0]['page'] == 'index')
return config
|
Load config info from a .json file and return it.
|
def filelist_prune(self, at_data, *args, **kwargs):
b_status = True
l_file = []
str_path = at_data[0]
al_file = at_data[1]
if len(self.str_extension):
al_file = [x for x in al_file if self.str_extension in x]
if len(al_file):
al_file.sort()
l_file = al_file
b_status = True
else:
self.dp.qprint( "No valid files to analyze found in path %s!" % str_path,
comms = 'error', level = 3)
l_file = None
b_status = False
return {
'status': b_status,
'l_file': l_file
}
|
Given a list of files, possibly prune list by
extension.
|
def get_children(self):
next_zoom = self.zoom + 1
return [
self.tile_pyramid.tile(
next_zoom,
self.row * 2 + row_offset,
self.col * 2 + col_offset
)
for row_offset, col_offset in [
(0, 0),
(0, 1),
(1, 1),
(1, 0),
]
if all([
self.row * 2 + row_offset < self.tp.matrix_height(next_zoom),
self.col * 2 + col_offset < self.tp.matrix_width(next_zoom)
])
]
|
Return tiles from next zoom level.
|
def parameters(self, namespaced=False):
if namespaced:
return json.loads(json.dumps(self.args[0]['parameters']), object_hook=lambda d: SimpleNamespace(**d))
else:
return self.args[0].get('parameters')
|
returns the exception varlink error parameters
|
def radii(self):
el = [site.species_string for site in self._structure.sites]
radii_dict = dict(zip(el, self._ionic_radii))
return radii_dict
|
List of ionic radii of elements in the order of sites.
|
def draw_ticks(self):
self._canvas_ticks.create_line((0, 10, self.pixel_width, 10), fill="black")
self._ticks = list(TimeLine.range(self._start, self._finish, self._tick_resolution / self._zoom_factor))
for tick in self._ticks:
string = TimeLine.get_time_string(tick, self._unit)
x = self.get_time_position(tick)
x_tick = x + 1 if x == 0 else (x - 1 if x == self.pixel_width else x)
x_text = x + 15 if x - 15 <= 0 else (x - 15 if x + 15 >= self.pixel_width else x)
self._canvas_ticks.create_text((x_text, 20), text=string, fill="black", font=("default", 10))
self._canvas_ticks.create_line((x_tick, 5, x_tick, 15), fill="black")
self._canvas_ticks.config(scrollregion="0 0 {0} {1}".format(self.pixel_width, 30))
|
Draw the time tick markers on the TimeLine Canvas
|
def _handle_actiondefinefunction(self, _):
obj = _make_object("ActionDefineFunction")
obj.FunctionName = self._get_struct_string()
obj.NumParams = unpack_ui16(self._src)
for i in range(1, obj.NumParams + 1):
setattr(obj, "param" + str(i), self._get_struct_string())
obj.CodeSize = unpack_ui16(self._src)
yield obj
|
Handle the ActionDefineFunction action.
|
def initialize_path(self, path_num=None):
for c in self.consumers:
c.initialize_path(path_num)
self.state = [c.state for c in self.consumers]
|
make the consumer_state ready for the next MC path
:param int path_num:
|
def iter_memory_snapshot(self, minAddr = None, maxAddr = None):
memory = self.get_memory_map(minAddr, maxAddr)
if not memory:
return
try:
filenames = self.get_mapped_filenames(memory)
except WindowsError:
e = sys.exc_info()[1]
if e.winerror != win32.ERROR_ACCESS_DENIED:
raise
filenames = dict()
if minAddr is not None:
minAddr = MemoryAddresses.align_address_to_page_start(minAddr)
mbi = memory[0]
if mbi.BaseAddress < minAddr:
mbi.RegionSize = mbi.BaseAddress + mbi.RegionSize - minAddr
mbi.BaseAddress = minAddr
if maxAddr is not None:
if maxAddr != MemoryAddresses.align_address_to_page_start(maxAddr):
maxAddr = MemoryAddresses.align_address_to_page_end(maxAddr)
mbi = memory[-1]
if mbi.BaseAddress + mbi.RegionSize > maxAddr:
mbi.RegionSize = maxAddr - mbi.BaseAddress
while memory:
mbi = memory.pop(0)
mbi.filename = filenames.get(mbi.BaseAddress, None)
if mbi.has_content():
mbi.content = self.read(mbi.BaseAddress, mbi.RegionSize)
else:
mbi.content = None
yield mbi
|
Returns an iterator that allows you to go through the memory contents
of a process.
It's basically the same as the L{take_memory_snapshot} method, but it
takes the snapshot of each memory region as it goes, as opposed to
taking the whole snapshot at once. This allows you to work with very
large snapshots without a significant performance penalty.
Example::
# Print the memory contents of a process.
process.suspend()
try:
snapshot = process.generate_memory_snapshot()
for mbi in snapshot:
print HexDump.hexblock(mbi.content, mbi.BaseAddress)
finally:
process.resume()
The downside of this is the process must remain suspended while
iterating the snapshot, otherwise strange things may happen.
The snapshot can only iterated once. To be able to iterate indefinitely
call the L{generate_memory_snapshot} method instead.
You can also iterate the memory of a dead process, just as long as the
last open handle to it hasn't been closed.
@see: L{take_memory_snapshot}
@type minAddr: int
@param minAddr: (Optional) Starting address in address range to query.
@type maxAddr: int
@param maxAddr: (Optional) Ending address in address range to query.
@rtype: iterator of L{win32.MemoryBasicInformation}
@return: Iterator of memory region information objects.
Two extra properties are added to these objects:
- C{filename}: Mapped filename, or C{None}.
- C{content}: Memory contents, or C{None}.
|
def lemmatize(self):
_lemmatizer = PatternParserLemmatizer(tokenizer=NLTKPunktTokenizer())
_raw = " ".join(self) + "."
_lemmas = _lemmatizer.lemmatize(_raw)
return self.__class__([Word(l, t) for l, t in _lemmas])
|
Return the lemma of each word in this WordList.
Currently using NLTKPunktTokenizer() for all lemmatization
tasks. This might cause slightly different tokenization results
compared to the TextBlob.words property.
|
def build(self):
if self.colour:
embed = discord.Embed(
title=self.title,
type='rich',
description=self.description,
colour=self.colour)
else:
embed = discord.Embed(
title=self.title,
type='rich',
description=self.description)
if self.thumbnail:
embed.set_thumbnail(url=self.thumbnail)
if self.image:
embed.set_image(url=self.image)
embed.set_author(
name="Modis",
url="https://musicbyango.com/modis/",
icon_url="http://musicbyango.com/modis/dp/modis64t.png")
for pack in self.datapacks:
embed.add_field(
name=pack[0],
value=pack[1],
inline=pack[2]
)
return embed
|
Builds Discord embed GUI
Returns:
discord.Embed: Built GUI
|
def _requirement_element(self, parent_element, req_data):
req_data = self._transform_result(req_data)
if not req_data:
return
title = req_data.get("title")
if not title:
logger.warning("Skipping requirement, title is missing")
return
req_id = req_data.get("id")
if not self._check_lookup_prop(req_id):
logger.warning(
"Skipping requirement `%s`, data missing for selected lookup method", title
)
return
attrs, custom_fields = self._classify_data(req_data)
attrs, custom_fields = self._fill_defaults(attrs, custom_fields)
attrs = OrderedDict(sorted(attrs.items()))
custom_fields = OrderedDict(sorted(custom_fields.items()))
requirement = etree.SubElement(parent_element, "requirement", attrs)
title_el = etree.SubElement(requirement, "title")
title_el.text = title
description = req_data.get("description")
if description:
description_el = etree.SubElement(requirement, "description")
description_el.text = description
self._fill_custom_fields(requirement, custom_fields)
|
Adds requirement XML element.
|
def validate_hex(value):
try:
binascii.unhexlify(value)
except Exception:
raise vol.Invalid(
'{} is not of hex format'.format(value))
return value
|
Validate that value has hex format.
|
def _replace_constant_methods(self):
self.cumulative_distribution = self._constant_cumulative_distribution
self.percent_point = self._constant_percent_point
self.probability_density = self._constant_probability_density
self.sample = self._constant_sample
|
Replaces conventional distribution methods by its constant counterparts.
|
def get_definition(self, name):
if name not in SERVICES:
raise ONVIFError('Unknown service %s' % name)
wsdl_file = SERVICES[name]['wsdl']
ns = SERVICES[name]['ns']
wsdlpath = os.path.join(self.wsdl_dir, wsdl_file)
if not os.path.isfile(wsdlpath):
raise ONVIFError('No such file: %s' % wsdlpath)
if name == 'devicemgmt':
xaddr = 'http://%s:%s/onvif/device_service' % (self.host, self.port)
return xaddr, wsdlpath
xaddr = self.xaddrs.get(ns)
if not xaddr:
raise ONVIFError('Device doesn`t support service: %s' % name)
return xaddr, wsdlpath
|
Returns xaddr and wsdl of specified service
|
def fit(self, data):
di = np.array(range(1, data.shape[1]))
indexless_data = data[:, di]
n_dims = indexless_data.shape[1]
if isinstance(self.n_cubes, Iterable):
n_cubes = np.array(self.n_cubes)
assert (
len(n_cubes) == n_dims
), "Custom cubes in each dimension must match number of dimensions"
else:
n_cubes = np.repeat(self.n_cubes, n_dims)
if isinstance(self.perc_overlap, Iterable):
perc_overlap = np.array(self.perc_overlap)
assert (
len(perc_overlap) == n_dims
), "Custom cubes in each dimension must match number of dimensions"
else:
perc_overlap = np.repeat(self.perc_overlap, n_dims)
assert all(0.0 <= p <= 1.0 for p in perc_overlap), (
"Each overlap percentage must be between 0.0 and 1.0., not %s"
% perc_overlap
)
bounds = self._compute_bounds(indexless_data)
ranges = bounds[1] - bounds[0]
inner_range = ((n_cubes - 1) / n_cubes) * ranges
inset = (ranges - inner_range) / 2
radius = ranges / (2 * (n_cubes) * (1 - perc_overlap))
zip_items = list(bounds)
zip_items.extend([n_cubes, inset])
centers_per_dimension = [
np.linspace(b + r, c - r, num=n) for b, c, n, r in zip(*zip_items)
]
centers = [np.array(c) for c in product(*centers_per_dimension)]
self.centers_ = centers
self.radius_ = radius
self.inset_ = inset
self.inner_range_ = inner_range
self.bounds_ = bounds
self.di_ = di
if self.verbose > 0:
print(
" - Cover - centers: %s\ninner_range: %s\nradius: %s"
% (self.centers_, self.inner_range_, self.radius_)
)
return centers
|
Fit a cover on the data. This method constructs centers and radii in each dimension given the `perc_overlap` and `n_cube`.
Parameters
============
data: array-like
Data to apply the cover to. Warning: First column must be an index column.
Returns
========
centers: list of arrays
A list of centers for each cube
|
def q(segmentation, s1, s2):
index1 = find_index(segmentation, s1)
index2 = find_index(segmentation, s2)
return index1 == index2
|
Test if ``s1`` and ``s2`` are in the same symbol, given the
``segmentation``.
|
def output_dir(self, *args) -> str:
return os.path.join(self.project_dir, 'output', *args)
|
Directory where to store output
|
def indices_within_times(times, start, end):
start, end = segments_to_start_end(start_end_to_segments(start, end).coalesce())
tsort = times.argsort()
times_sorted = times[tsort]
left = numpy.searchsorted(times_sorted, start)
right = numpy.searchsorted(times_sorted, end)
if len(left) == 0:
return numpy.array([], dtype=numpy.uint32)
return tsort[numpy.hstack(numpy.r_[s:e] for s, e in zip(left, right))]
|
Return an index array into times that lie within the durations defined by start end arrays
Parameters
----------
times: numpy.ndarray
Array of times
start: numpy.ndarray
Array of duration start times
end: numpy.ndarray
Array of duration end times
Returns
-------
indices: numpy.ndarray
Array of indices into times
|
async def set_builtin_type_codec(self, typename, *,
schema='public', codec_name,
format=None):
self._check_open()
typeinfo = await self.fetchrow(
introspection.TYPE_BY_NAME, typename, schema)
if not typeinfo:
raise exceptions.InterfaceError(
'unknown type: {}.{}'.format(schema, typename))
if not introspection.is_scalar_type(typeinfo):
raise exceptions.InterfaceError(
'cannot alias non-scalar type {}.{}'.format(
schema, typename))
oid = typeinfo['oid']
self._protocol.get_settings().set_builtin_type_codec(
oid, typename, schema, 'scalar', codec_name, format)
self._drop_local_statement_cache()
|
Set a builtin codec for the specified scalar data type.
This method has two uses. The first is to register a builtin
codec for an extension type without a stable OID, such as 'hstore'.
The second use is to declare that an extension type or a
user-defined type is wire-compatible with a certain builtin
data type and should be exchanged as such.
:param typename:
Name of the data type the codec is for.
:param schema:
Schema name of the data type the codec is for
(defaults to ``'public'``).
:param codec_name:
The name of the builtin codec to use for the type.
This should be either the name of a known core type
(such as ``"int"``), or the name of a supported extension
type. Currently, the only supported extension type is
``"pg_contrib.hstore"``.
:param format:
If *format* is ``None`` (the default), all formats supported
by the target codec are declared to be supported for *typename*.
If *format* is ``'text'`` or ``'binary'``, then only the
specified format is declared to be supported for *typename*.
.. versionchanged:: 0.18.0
The *codec_name* argument can be the name of any known
core data type. Added the *format* keyword argument.
|
def _magic(header, footer, mime, ext=None):
if not header:
raise ValueError("Input was empty")
info = _identify_all(header, footer, ext)[0]
if mime:
return info.mime_type
return info.extension if not \
isinstance(info.extension, list) else info[0].extension
|
Discover what type of file it is based on the incoming string
|
def safe_values(self, value):
string_val = ""
if isinstance(value, datetime.date):
try:
string_val = value.strftime('{0}{1}{2}'.format(
current_app.config['DATETIME']['DATE_FORMAT'],
current_app.config['DATETIME']['SEPARATOR'],
current_app.config['DATETIME']['TIME_FORMAT']))
except RuntimeError as error:
string_val = value.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(value, bytes):
string_val = value.decode('utf-8')
elif isinstance(value, decimal.Decimal):
string_val = float(value)
else:
string_val = value
return string_val
|
Parse non-string values that will not serialize
|
def _update_route(dcidr, router_ip, old_router_ip,
vpc_info, con, route_table_id, update_reason):
instance = eni = None
try:
instance, eni = find_instance_and_eni_by_ip(vpc_info, router_ip)
logging.info("--- updating existing route in RT '%s' "
"%s -> %s (%s, %s) (old IP: %s, reason: %s)" %
(route_table_id, dcidr, router_ip,
instance.id, eni.id, old_router_ip, update_reason))
try:
con.replace_route(
route_table_id = route_table_id,
destination_cidr_block = dcidr,
instance_id = instance.id,
interface_id = eni.id)
except Exception as e:
raise Exception("replace_route failed: %s" % str(e))
CURRENT_STATE.routes[dcidr] = \
(router_ip, str(instance.id), str(eni.id))
except Exception as e:
msg = "*** failed to update route in RT '%s' %s -> %s (%s)" % \
(route_table_id, dcidr, old_router_ip, e.message)
update_reason += " [ERROR update route: %s]" % e.message
logging.error(msg)
_rt_state_update(route_table_id, dcidr, router_ip,
instance.id if instance else "(none)",
eni.id if eni else "(none)",
old_router_ip, update_reason)
|
Update an existing route entry in the route table.
|
def _init_session(self):
self._real_session = requests.Session()
if self._port == 443:
logger.debug('initializing HTTPS session')
self._real_base_url = 'https://{host}:{port}'.format(host=self._host, port=self._port)
self._real_adapter = self._real_session.adapters['https://']
if self._ca_bundle is not None:
logger.debug('use CA bundle: %s', self._ca_bundle)
ca_bundle = os.path.expanduser(os.path.expandvars(self._ca_bundle))
if not os.path.exists(ca_bundle):
raise OSError(
'CA bundle file does not exist: {0}'.format(ca_bundle)
)
self._real_session.verify = ca_bundle
else:
logger.debug('initializing HTTP session')
self._real_base_url = 'http://{host}:{port}'.format(host=self._host, port=self._port)
self._real_adapter = self._real_session.adapters['http://']
self._real_session.get(self._real_base_url)
self._real_session.headers.update({'Host': self._host})
self._login(self._username, self._password)
|
Delayed initialization of Requests Session object.
This is done in order *not* to share the Session object across
a multiprocessing pool.
|
def _newRemoteException(ErrorType):
RemoteErrorBaseType = _RemoteExceptionMeta('', (ErrorType,), {})
class RemoteException(RemoteErrorBaseType):
BaseExceptionType = ErrorType
def __init__(self, thrownError, tracebackString):
self.thrownError = thrownError
self.tracebackString = tracebackString
RemoteErrorBaseType.__init__(self, *thrownError.args)
loadError = staticmethod(_loadError)
def __str__(self):
return '\n%s\n%s' % (self.tracebackString, self.thrownError)
def __reduce__(self):
args = (ErrorType, self.thrownError, self.tracebackString)
return self.loadError, args
RemoteException.__name__ = 'Remote' + ErrorType.__name__
return RemoteException
|
create a new RemoteExceptionType from a given errortype
|
def dup2(a, b, timeout=3):
dup_err = None
for i in range(int(10 * timeout)):
try:
return os.dup2(a, b)
except OSError as e:
dup_err = e
if e.errno == errno.EBUSY:
time.sleep(0.1)
else:
raise
if dup_err:
raise dup_err
|
Like os.dup2, but retry on EBUSY
|
def add_listener(self, callback, event_type=None):
listener_id = uuid4()
self.listeners.append(
{
'uid': listener_id,
'callback': callback,
'event_type': event_type
}
)
return listener_id
|
Add a callback handler for events going to this room.
Args:
callback (func(room, event)): Callback called when an event arrives.
event_type (str): The event_type to filter for.
Returns:
uuid.UUID: Unique id of the listener, can be used to identify the listener.
|
def list_flavors(self, limit=None, marker=None):
return self._flavor_manager.list(limit=limit, marker=marker)
|
Returns a list of all available Flavors.
|
def available_options(self):
for option in list(self.cmd.options.values()):
if (option.is_multiple or
option not in list(self.used_options)):
yield option
|
Return options that can be used given
the current cmd line
rtype: command.Option generator
|
def _build_document_converter(cls, session: AppSession):
if not session.args.convert_links:
return
converter = session.factory.new(
'BatchDocumentConverter',
session.factory['HTMLParser'],
session.factory['ElementWalker'],
session.factory['URLTable'],
backup=session.args.backup_converted
)
return converter
|
Build the Document Converter.
|
def connect(self):
config = self.config
self.rdb = redis.Redis(config['host'], config['port'], config['db'],\
config['password'])
try:
info = self.rdb.info()
self.connected = True
except redis.ConnectionError:
return False
return True
|
Creates the connection with the redis server.
Return ``True`` if the connection works, else returns
``False``. It does not take any arguments.
:return: ``Boolean`` value
.. note::
After creating the ``Queue`` object the user should call
the ``connect`` method to create the connection.
.. doctest::
>>> from retask import Queue
>>> q = Queue('test')
>>> q.connect()
True
|
def reentrancies(self):
entrancies = defaultdict(int)
entrancies[self.top] += 1
for t in self.edges():
entrancies[t.target] += 1
return dict((v, cnt - 1) for v, cnt in entrancies.items() if cnt >= 2)
|
Return a mapping of variables to their re-entrancy count.
A re-entrancy is when more than one edge selects a node as its
target. These graphs are rooted, so the top node always has an
implicit entrancy. Only nodes with re-entrancies are reported,
and the count is only for the entrant edges beyond the first.
Also note that these counts are for the interpreted graph, not
for the linearized form, so inverted edges are always
re-entrant.
|
def _format_key_name(self) -> str:
key_name = 'ld:{0}:{1}:features'.format(
self.project_key,
self.environment_key
)
return key_name
|
Return formatted redis key name.
|
def _to_dot_key(cls, section, key=None):
if key:
return (NON_ALPHA_NUM.sub('_', section.lower()), NON_ALPHA_NUM.sub('_', key.lower()))
else:
return NON_ALPHA_NUM.sub('_', section.lower())
|
Return the section and key in dot notation format.
|
def set_computer_sleep(minutes):
value = _validate_sleep(minutes)
cmd = 'systemsetup -setcomputersleep {0}'.format(value)
salt.utils.mac_utils.execute_return_success(cmd)
return salt.utils.mac_utils.confirm_updated(
str(value),
get_computer_sleep,
)
|
Set the amount of idle time until the computer sleeps. Pass "Never" of "Off"
to never sleep.
:param minutes: Can be an integer between 1 and 180 or "Never" or "Off"
:ptype: int, str
:return: True if successful, False if not
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' power.set_computer_sleep 120
salt '*' power.set_computer_sleep off
|
def cycle_windows(tree, direction):
wanted = {
"orientation": ("vertical" if direction in ("up", "down")
else "horizontal"),
"direction": (1 if direction in ("down", "right")
else -1),
}
split = find_parent_split(tree.focused.parent, wanted["orientation"])
if split:
child_ids = [child.id for child in split.children]
focus_idx = child_ids.index(split.focused_child.id)
next_idx = (focus_idx + wanted['direction']) % len(child_ids)
next_node = split.children[next_idx]
return find_focusable(next_node)
return None
|
Cycle through windows of the current workspace
|
def get_command_templates(command_tokens, file_tokens=[], path_tokens=[],
job_options=[]):
files = get_files(file_tokens)
paths = get_paths(path_tokens)
job_options = get_options(job_options)
templates = _get_command_templates(command_tokens, files, paths,
job_options)
for command_template in templates:
command_template._dependencies = _get_prelim_dependencies(
command_template, templates)
return templates
|
Given a list of tokens from the grammar, return a
list of commands.
|
def GetMetricTypes(self, request, context):
LOG.debug("GetMetricTypes called")
try:
metrics = self.plugin.update_catalog(ConfigMap(pb=request.config))
return MetricsReply(metrics=[m.pb for m in metrics])
except Exception as err:
msg = "message: {}\n\nstack trace: {}".format(
err, traceback.format_exc())
return MetricsReply(metrics=[], error=msg)
|
Dispatches the request to the plugins update_catalog method
|
def write_tiff_image(filename, image, compress=False):
if compress:
return tiff.imsave(filename, image, compress='lzma')
return tiff.imsave(filename, image)
|
Write image data to TIFF file
:param filename: name of file to write data to
:type filename: str
:param image: image data to write to file
:type image: numpy array
:param compress: whether to compress data. If ``True``, lzma compression is used. Default is ``False``
:type compress: bool
|
def pushpopitem(self, key, value, node_factory=_Node):
heap = self._heap
position = self._position
precedes = self._precedes
prio = self._keyfn(value) if self._keyfn else value
node = node_factory(key, value, prio)
if key in self:
raise KeyError('%s is already in the queue' % repr(key))
if heap and precedes(heap[0].prio, node.prio):
node, heap[0] = heap[0], node
position[key] = 0
del position[node.key]
self._sink(0)
return node.key, node.value
|
Equivalent to inserting a new item followed by removing the top
priority item, but faster. Raises ``KeyError`` if the new key is
already in the pqdict.
|
def get_grouped_psf_model(template_psf_model, star_group, pars_to_set):
group_psf = None
for star in star_group:
psf_to_add = template_psf_model.copy()
for param_tab_name, param_name in pars_to_set.items():
setattr(psf_to_add, param_name, star[param_tab_name])
if group_psf is None:
group_psf = psf_to_add
else:
group_psf += psf_to_add
return group_psf
|
Construct a joint PSF model which consists of a sum of PSF's templated on
a specific model, but whose parameters are given by a table of objects.
Parameters
----------
template_psf_model : `astropy.modeling.Fittable2DModel` instance
The model to use for *individual* objects. Must have parameters named
``x_0``, ``y_0``, and ``flux``.
star_group : `~astropy.table.Table`
Table of stars for which the compound PSF will be constructed. It
must have columns named ``x_0``, ``y_0``, and ``flux_0``.
Returns
-------
group_psf
An `astropy.modeling` ``CompoundModel`` instance which is a sum of the
given PSF models.
|
def qwarp_apply(dset_from,dset_warp,affine=None,warp_suffix='_warp',master='WARP',interp=None,prefix=None):
out_dset = prefix
if out_dset==None:
out_dset = os.path.split(nl.suffix(dset_from,warp_suffix))[1]
dset_from_info = nl.dset_info(dset_from)
dset_warp_info = nl.dset_info(dset_warp)
if(dset_from_info.orient!=dset_warp_info.orient):
nl.run(['3dresample','-orient',dset_warp_info.orient,'-prefix',nl.suffix(dset_from,'_reorient'),'-inset',dset_from],products=nl.suffix(dset_from,'_reorient'))
dset_from = nl.suffix(dset_from,'_reorient')
warp_opt = str(dset_warp)
if affine:
warp_opt += ' ' + affine
cmd = [
'3dNwarpApply',
'-nwarp', warp_opt]
cmd += [
'-source', dset_from,
'-master',master,
'-prefix', out_dset
]
if interp:
cmd += ['-interp',interp]
nl.run(cmd,products=out_dset)
|
applies the transform from a previous qwarp
Uses the warp parameters from the dataset listed in
``dset_warp`` (usually the dataset name ends in ``_WARP``)
to the dataset ``dset_from``. If a ``.1D`` file is given
in the ``affine`` parameter, it will be applied simultaneously
with the qwarp.
If the parameter ``interp`` is given, will use as interpolation method,
otherwise it will just use the default (currently wsinc5)
Output dataset with have the ``warp_suffix`` suffix added to its name
|
def elapsed(self, total=True):
return self.timer.elapsed(self.label, total=total)
|
Return the elapsed time for the timer.
Parameters
----------
total : bool, optional (default True)
If ``True`` return the total elapsed time since the first
call of :meth:`start` for the selected timer, otherwise
return the elapsed time since the most recent call of
:meth:`start` for which there has not been a corresponding
call to :meth:`stop`.
Returns
-------
dlt : float
Elapsed time
|
def fcor(self):
if self.XCBV is None:
return None
else:
return self.flux - self._mission.FitCBVs(self)
|
The CBV-corrected de-trended flux.
|
def parse(file_path=None, show=False):
if file_path is None:
file_path = os.path.join(os.getcwd(), '.sciunit')
if not os.path.exists(file_path):
raise IOError('No .sciunit file was found at %s' % file_path)
config = configparser.RawConfigParser(allow_no_value=True)
config.read(file_path)
for section in config.sections():
if show:
print(section)
for options in config.options(section):
if show:
print("\t%s: %s" % (options, config.get(section, options)))
return config
|
Parse a .sciunit config file.
|
def scandir(path='.'):
scandir_path = fsdecode(path).replace('\\', '/')
if not is_storage(scandir_path):
return os_scandir(scandir_path)
return _scandir_generator(
is_bytes=isinstance(fspath(path), (bytes, bytearray)),
scandir_path=scandir_path, system=get_instance(scandir_path))
|
Return an iterator of os.DirEntry objects corresponding to the entries in
the directory given by path. The entries are yielded in arbitrary order,
and the special entries '.' and '..' are not included.
Equivalent to "os.scandir".
Args:
path (path-like object): Path or URL.
If path is of type bytes (directly or indirectly through the
PathLike interface), the type of the name and path attributes
of each os.DirEntry will be bytes; in all other circumstances,
they will be of type str.
Returns:
Generator of os.DirEntry: Entries information.
|
def discretized_mix_logistic_loss(pred, labels):
logits, locs, log_scales, coeffs = split_to_discretized_mix_logistic_params(
pred)
batch, height, width, num_mixtures = shape_list(logits)
labels = tf.tile(
tf.reshape(labels, [batch, height, width, 1, 3]),
[1, 1, 1, num_mixtures, 1])
means_0 = locs[..., 0]
means_1 = locs[..., 1] + coeffs[..., 0] * labels[..., 0]
means_2 = (
locs[..., 2] + coeffs[..., 1] * labels[..., 0] +
coeffs[..., 2] * labels[..., 1])
means = tf.stack([means_0, means_1, means_2], axis=-1)
centered_labels = labels - means
inv_stdv = tf.exp(-log_scales)
plus_in = inv_stdv * (centered_labels + 1. / 255.)
min_in = inv_stdv * (centered_labels - 1. / 255.)
cdf_plus = tf.nn.sigmoid(plus_in)
cdf_min = tf.nn.sigmoid(min_in)
log_prob_0 = plus_in - tf.nn.softplus(plus_in)
log_prob_255 = -tf.nn.softplus(min_in)
prob_event = tf.maximum(cdf_plus - cdf_min, 1e-12)
log_prob_event = tf.log(prob_event)
mid_in = inv_stdv * centered_labels
log_prob_event_approx = (
mid_in - log_scales - 2. * tf.nn.softplus(mid_in) - np.log(127.5))
log_probs = tf.where(
labels < -0.999, log_prob_0,
tf.where(
labels > 0.999, log_prob_255,
tf.where(prob_event > 1e-5, log_prob_event, log_prob_event_approx)))
log_probs = tf.reduce_sum(log_probs, -1) + tf.nn.log_softmax(logits, axis=-1)
output = -tf.reduce_logsumexp(log_probs, axis=-1)
return output
|
Computes negative log probability for the discretized mixture of logistics.
The distribution of a whole pixel is a mixture of 3-dimensional discretized
logistic distributions. The 3-D discretized logistic factorizes as 3 1-D
discretized logistic distributions, one for each channel. It defines
```none
P(X = x)
= sum_{k=1}^K probs[k] * P(X = x | locs[k], scales[k])
= sum_{k=1}^K probs[k] * [
prod_{c=1}^3 DiscretizedLogistic(X[c] = x[c] | means[k][c], scales[k]) ]
```
The means tensor is a linear combination of location parameters and previous
channels. The discretized logistic distribution assigns probability mass to an
event P(X=x) via logistic CDFs: P(X <= x + 0.5) - P(X < x - 0.5) for 1 < x <
254; P(X <= 0.5) for x = 0; and 1 - P(X < 245.5) for x = 255. Instead of
8-bit inputs, this implementation assumes the events are rescaled to [-1, 1].
Args:
pred: A [batch, height, width, num_mixtures*10] tensor of floats
comprising one unconstrained mixture probability, three means
(one per channel), three standard deviations (one per channel),
and three coefficients which linearly parameterize dependence across
channels.
labels: A [batch, height, width, channels] tensor of true pixel intensities
rescaled to [-1, 1]. The computation assumes channels is 3.
Returns:
A [batch, height, width] tensor of the negative log conditional probability
of each pixel given all previous pixels.
|
def draw(self,N=1.5):
while N>0.5:
for (l,mvmt) in self.ordering_step():
pass
N = N-1
if N>0:
for (l,mvmt) in self.ordering_step(oneway=True):
pass
self.setxy()
self.draw_edges()
|
compute every node coordinates after converging to optimal ordering by N
rounds, and finally perform the edge routing.
|
def from_service_account_json(cls, json_credentials_path, *args, **kwargs):
if "credentials" in kwargs:
raise TypeError("credentials must not be in keyword arguments")
with io.open(json_credentials_path, "r", encoding="utf-8") as json_fi:
credentials_info = json.load(json_fi)
credentials = service_account.Credentials.from_service_account_info(
credentials_info
)
if cls._SET_PROJECT:
if "project" not in kwargs:
kwargs["project"] = credentials_info.get("project_id")
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
|
Factory to retrieve JSON credentials while creating client.
:type json_credentials_path: str
:param json_credentials_path: The path to a private key file (this file
was given to you when you created the
service account). This file must contain
a JSON object with a private key and
other credentials information (downloaded
from the Google APIs console).
:type args: tuple
:param args: Remaining positional arguments to pass to constructor.
:type kwargs: dict
:param kwargs: Remaining keyword arguments to pass to constructor.
:rtype: :class:`_ClientFactoryMixin`
:returns: The client created with the retrieved JSON credentials.
:raises TypeError: if there is a conflict with the kwargs
and the credentials created by the factory.
|
def _succeed(self, result):
for fn, args, kwargs in self._callbacks:
fn(result, *args, **kwargs)
self._resulted_in = result
|
Fire the success chain.
|
def sample_zip(items_list, num_samples, allow_overflow=False, per_bin=1):
samples_list = [[] for _ in range(num_samples)]
samples_iter = zip_longest(*items_list)
sx = 0
for ix, samples_ in zip(range(num_samples), samples_iter):
samples = filter_Nones(samples_)
samples_list[sx].extend(samples)
if (ix + 1) % per_bin == 0:
sx += 1
if allow_overflow:
overflow_samples = flatten([filter_Nones(samples_) for samples_ in samples_iter])
return samples_list, overflow_samples
else:
try:
samples_iter.next()
except StopIteration:
pass
else:
raise AssertionError('Overflow occured')
return samples_list
|
Helper for sampling
Given a list of lists, samples one item for each list and bins them into
num_samples bins. If all sublists are of equal size this is equivilent to a
zip, but otherewise consecutive bins will have monotonically less
elemements
# Doctest doesn't work with assertionerror
#util_list.sample_zip(items_list, 2)
#...
#AssertionError: Overflow occured
Args:
items_list (list):
num_samples (?):
allow_overflow (bool):
per_bin (int):
Returns:
tuple : (samples_list, overflow_samples)
Examples:
>>> # DISABLE_DOCTEST
>>> from utool import util_list
>>> items_list = [[1, 2, 3, 4, 0], [5, 6, 7], [], [8, 9], [10]]
>>> util_list.sample_zip(items_list, 5)
...
[[1, 5, 8, 10], [2, 6, 9], [3, 7], [4], [0]]
>>> util_list.sample_zip(items_list, 2, allow_overflow=True)
...
([[1, 5, 8, 10], [2, 6, 9]], [3, 7, 4])
>>> util_list.sample_zip(items_list, 4, allow_overflow=True, per_bin=2)
...
([[1, 5, 8, 10, 2, 6, 9], [3, 7, 4], [], []], [0])
|
def pkginfo_to_metadata(egg_info_path, pkginfo_path):
pkg_info = read_pkg_info(pkginfo_path)
pkg_info.replace_header('Metadata-Version', '2.1')
requires_path = os.path.join(egg_info_path, 'requires.txt')
if os.path.exists(requires_path):
with open(requires_path) as requires_file:
requires = requires_file.read()
for extra, reqs in sorted(pkg_resources.split_sections(requires),
key=lambda x: x[0] or ''):
for item in generate_requirements({extra: reqs}):
pkg_info[item[0]] = item[1]
description = pkg_info['Description']
if description:
pkg_info.set_payload(dedent_description(pkg_info))
del pkg_info['Description']
return pkg_info
|
Convert .egg-info directory with PKG-INFO to the Metadata 2.1 format
|
def populate_obj(self, obj, keys=None):
keys = keys or self.keys()
for key in keys:
key = key.upper()
value = self.get(key, empty)
if value is not empty:
setattr(obj, key, value)
|
Given the `obj` populate it using self.store items.
|
def RecordHelloWorld(handler, t):
url = "%s/receive_recording.py" % THIS_URL
t.startRecording(url)
t.say ("Hello, World.")
t.stopRecording()
json = t.RenderJson()
logging.info ("RecordHelloWorld json: %s" % json)
handler.response.out.write(json)
|
Demonstration of recording a message.
|
def getConfigFile():
fileName = '.wakatime.cfg'
home = os.environ.get('WAKATIME_HOME')
if home:
return os.path.join(os.path.expanduser(home), fileName)
return os.path.join(os.path.expanduser('~'), fileName)
|
Returns the config file location.
If $WAKATIME_HOME env varialbe is defined, returns
$WAKATIME_HOME/.wakatime.cfg, otherwise ~/.wakatime.cfg.
|
def imshow(x, y, z, ax, **kwargs):
if x.ndim != 1 or y.ndim != 1:
raise ValueError('imshow requires 1D coordinates, try using '
'pcolormesh or contour(f)')
try:
xstep = (x[1] - x[0]) / 2.0
except IndexError:
xstep = .1
try:
ystep = (y[1] - y[0]) / 2.0
except IndexError:
ystep = .1
left, right = x[0] - xstep, x[-1] + xstep
bottom, top = y[-1] + ystep, y[0] - ystep
defaults = {'origin': 'upper',
'interpolation': 'nearest'}
if not hasattr(ax, 'projection'):
defaults['aspect'] = 'auto'
defaults.update(kwargs)
if defaults['origin'] == 'upper':
defaults['extent'] = [left, right, bottom, top]
else:
defaults['extent'] = [left, right, top, bottom]
if z.ndim == 3:
if z.shape[-1] == 3:
alpha = np.ma.ones(z.shape[:2] + (1,), dtype=z.dtype)
if np.issubdtype(z.dtype, np.integer):
alpha *= 255
z = np.ma.concatenate((z, alpha), axis=2)
else:
z = z.copy()
z[np.any(z.mask, axis=-1), -1] = 0
primitive = ax.imshow(z, **defaults)
return primitive
|
Image plot of 2d DataArray using matplotlib.pyplot
Wraps :func:`matplotlib:matplotlib.pyplot.imshow`
While other plot methods require the DataArray to be strictly
two-dimensional, ``imshow`` also accepts a 3D array where some
dimension can be interpreted as RGB or RGBA color channels and
allows this dimension to be specified via the kwarg ``rgb=``.
Unlike matplotlib, Xarray can apply ``vmin`` and ``vmax`` to RGB or RGBA
data, by applying a single scaling factor and offset to all bands.
Passing ``robust=True`` infers ``vmin`` and ``vmax``
:ref:`in the usual way <robust-plotting>`.
.. note::
This function needs uniformly spaced coordinates to
properly label the axes. Call DataArray.plot() to check.
The pixels are centered on the coordinates values. Ie, if the coordinate
value is 3.2 then the pixels for those coordinates will be centered on 3.2.
|
def uncancel_confirmation(self, confirmation_id):
return self._create_put_request(
resource=CONFIRMATIONS,
billomat_id=confirmation_id,
command=UNCANCEL,
)
|
Uncancelles an confirmation
:param confirmation_id: the confirmation id
|
def Docker():
docker_info = {'server': {}, 'env': '', 'type': '', 'os': ''}
try:
d_client = docker.from_env()
docker_info['server'] = d_client.version()
except Exception as e:
logger.error("Can't get docker info " + str(e))
system = System()
docker_info['os'] = system
if 'DOCKER_MACHINE_NAME' in environ:
docker_info['env'] = environ['DOCKER_MACHINE_NAME']
docker_info['type'] = 'docker-machine'
elif 'DOCKER_HOST' in environ:
docker_info['env'] = environ['DOCKER_HOST']
docker_info['type'] = 'remote'
else:
docker_info['type'] = 'native'
return docker_info
|
Get Docker setup information
|
def connect(self, addr):
if _debug: RouterToRouterService._debug("connect %r", addr)
conn = ConnectionState(addr)
self.multiplexer.connections[addr] = conn
conn.service = self
conn.pendingNPDU = []
request = ServiceRequest(ROUTER_TO_ROUTER_SERVICE_ID)
request.pduDestination = addr
self.service_request(request)
return conn
|
Initiate a connection request to the peer router.
|
def clone_and_update(self, **kwargs):
cloned = self.clone()
cloned.update(**kwargs)
return cloned
|
Clones the object and updates the clone with the args
@param kwargs: Keyword arguments to set
@return: The cloned copy with updated values
|
def convert_mnist(directory, output_directory, output_filename=None,
dtype=None):
if not output_filename:
if dtype:
output_filename = 'mnist_{}.hdf5'.format(dtype)
else:
output_filename = 'mnist.hdf5'
output_path = os.path.join(output_directory, output_filename)
h5file = h5py.File(output_path, mode='w')
train_feat_path = os.path.join(directory, TRAIN_IMAGES)
train_features = read_mnist_images(train_feat_path, dtype)
train_lab_path = os.path.join(directory, TRAIN_LABELS)
train_labels = read_mnist_labels(train_lab_path)
test_feat_path = os.path.join(directory, TEST_IMAGES)
test_features = read_mnist_images(test_feat_path, dtype)
test_lab_path = os.path.join(directory, TEST_LABELS)
test_labels = read_mnist_labels(test_lab_path)
data = (('train', 'features', train_features),
('train', 'targets', train_labels),
('test', 'features', test_features),
('test', 'targets', test_labels))
fill_hdf5_file(h5file, data)
h5file['features'].dims[0].label = 'batch'
h5file['features'].dims[1].label = 'channel'
h5file['features'].dims[2].label = 'height'
h5file['features'].dims[3].label = 'width'
h5file['targets'].dims[0].label = 'batch'
h5file['targets'].dims[1].label = 'index'
h5file.flush()
h5file.close()
return (output_path,)
|
Converts the MNIST dataset to HDF5.
Converts the MNIST dataset to an HDF5 dataset compatible with
:class:`fuel.datasets.MNIST`. The converted dataset is
saved as 'mnist.hdf5'.
This method assumes the existence of the following files:
`train-images-idx3-ubyte.gz`, `train-labels-idx1-ubyte.gz`
`t10k-images-idx3-ubyte.gz`, `t10k-labels-idx1-ubyte.gz`
It assumes the existence of the following files:
* `train-images-idx3-ubyte.gz`
* `train-labels-idx1-ubyte.gz`
* `t10k-images-idx3-ubyte.gz`
* `t10k-labels-idx1-ubyte.gz`
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to `None`, in which case a name
based on `dtype` will be used.
dtype : str, optional
Either 'float32', 'float64', or 'bool'. Defaults to `None`,
in which case images will be returned in their original
unsigned byte format.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
|
def service_resume(service_name, init_dir="/etc/init",
initd_dir="/etc/init.d", **kwargs):
upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
sysv_file = os.path.join(initd_dir, service_name)
if init_is_systemd():
service('unmask', service_name)
service('enable', service_name)
elif os.path.exists(upstart_file):
override_path = os.path.join(
init_dir, '{}.override'.format(service_name))
if os.path.exists(override_path):
os.unlink(override_path)
elif os.path.exists(sysv_file):
subprocess.check_call(["update-rc.d", service_name, "enable"])
else:
raise ValueError(
"Unable to detect {0} as SystemD, Upstart {1} or"
" SysV {2}".format(
service_name, upstart_file, sysv_file))
started = service_running(service_name, **kwargs)
if not started:
started = service_start(service_name, **kwargs)
return started
|
Resume a system service.
Reenable starting again at boot. Start the service.
:param service_name: the name of the service to resume
:param init_dir: the path to the init dir
:param initd dir: the path to the initd dir
:param **kwargs: additional parameters to pass to the init system when
managing services. These will be passed as key=value
parameters to the init system's commandline. kwargs
are ignored for systemd enabled systems.
|
def download_luts(**kwargs):
import tarfile
import requests
TQDM_LOADED = True
try:
from tqdm import tqdm
except ImportError:
TQDM_LOADED = False
dry_run = kwargs.get('dry_run', False)
if 'aerosol_type' in kwargs:
if isinstance(kwargs['aerosol_type'], (list, tuple, set)):
aerosol_types = kwargs['aerosol_type']
else:
aerosol_types = [kwargs['aerosol_type'], ]
else:
aerosol_types = HTTPS_RAYLEIGH_LUTS.keys()
chunk_size = 10124
for subname in aerosol_types:
LOG.debug('Aerosol type: %s', subname)
http = HTTPS_RAYLEIGH_LUTS[subname]
LOG.debug('URL = %s', http)
subdir_path = RAYLEIGH_LUT_DIRS[subname]
try:
LOG.debug('Create directory: %s', subdir_path)
if not dry_run:
os.makedirs(subdir_path)
except OSError:
if not os.path.isdir(subdir_path):
raise
if dry_run:
continue
response = requests.get(http)
total_size = int(response.headers['content-length'])
filename = os.path.join(
subdir_path, "pyspectral_rayleigh_correction_luts.tgz")
if TQDM_LOADED:
with open(filename, "wb") as handle:
for data in tqdm(iterable=response.iter_content(chunk_size=chunk_size),
total=(total_size / chunk_size), unit='kB'):
handle.write(data)
else:
with open(filename, "wb") as handle:
for data in response.iter_content():
handle.write(data)
tar = tarfile.open(filename)
tar.extractall(subdir_path)
tar.close()
os.remove(filename)
|
Download the luts from internet.
|
def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
best_action_strings = output_dict["best_action_strings"]
world = NlvrLanguage(set())
logical_forms = []
for instance_action_sequences in best_action_strings:
instance_logical_forms = []
for action_strings in instance_action_sequences:
if action_strings:
instance_logical_forms.append(world.action_sequence_to_logical_form(action_strings))
else:
instance_logical_forms.append('')
logical_forms.append(instance_logical_forms)
action_mapping = output_dict['action_mapping']
best_actions = output_dict['best_action_strings']
debug_infos = output_dict['debug_info']
batch_action_info = []
for batch_index, (predicted_actions, debug_info) in enumerate(zip(best_actions, debug_infos)):
instance_action_info = []
for predicted_action, action_debug_info in zip(predicted_actions[0], debug_info):
action_info = {}
action_info['predicted_action'] = predicted_action
considered_actions = action_debug_info['considered_actions']
probabilities = action_debug_info['probabilities']
actions = []
for action, probability in zip(considered_actions, probabilities):
if action != -1:
actions.append((action_mapping[(batch_index, action)], probability))
actions.sort()
considered_actions, probabilities = zip(*actions)
action_info['considered_actions'] = considered_actions
action_info['action_probabilities'] = probabilities
action_info['question_attention'] = action_debug_info.get('question_attention', [])
instance_action_info.append(action_info)
batch_action_info.append(instance_action_info)
output_dict["predicted_actions"] = batch_action_info
output_dict["logical_form"] = logical_forms
return output_dict
|
This method overrides ``Model.decode``, which gets called after ``Model.forward``, at test
time, to finalize predictions. We only transform the action string sequences into logical
forms here.
|
def list_attr(self, recursive=False):
if recursive:
raise DeprecationWarning("Symbol.list_attr with recursive=True has been deprecated. "
"Please use attr_dict instead.")
size = mx_uint()
pairs = ctypes.POINTER(ctypes.c_char_p)()
f_handle = _LIB.MXSymbolListAttrShallow
check_call(f_handle(self.handle, ctypes.byref(size), ctypes.byref(pairs)))
return {py_str(pairs[i * 2]): py_str(pairs[i * 2 + 1]) for i in range(size.value)}
|
Gets all attributes from the symbol.
Example
-------
>>> data = mx.sym.Variable('data', attr={'mood': 'angry'})
>>> data.list_attr()
{'mood': 'angry'}
Returns
-------
ret : Dict of str to str
A dictionary mapping attribute keys to values.
|
def get_value_matched_by_regex(field_name, regex_matches, string):
try:
value = regex_matches.group(field_name)
if value is not None:
return value
except IndexError:
pass
raise MissingFieldError(string, field_name)
|
Ensure value stored in regex group exists.
|
def _convert_volume(self, volume):
data = {
'host': volume.get('hostPath'),
'container': volume.get('containerPath'),
'readonly': volume.get('mode') == 'RO',
}
return data
|
This is for ingesting the "volumes" of a app description
|
def stalk_buffer(self, pid, address, size, action = None):
self.__set_buffer_watch(pid, address, size, action, True)
|
Sets a one-shot page breakpoint and notifies
when the given buffer is accessed.
@see: L{dont_watch_variable}
@type pid: int
@param pid: Process global ID.
@type address: int
@param address: Memory address of buffer to watch.
@type size: int
@param size: Size in bytes of buffer to watch.
@type action: function
@param action: (Optional) Action callback function.
See L{define_page_breakpoint} for more details.
@rtype: L{BufferWatch}
@return: Buffer watch identifier.
|
def drop(manager: Manager, network_id: Optional[int], yes):
if network_id:
manager.drop_network_by_id(network_id)
elif yes or click.confirm('Drop all networks?'):
manager.drop_networks()
|
Drop a network by its identifier or drop all networks.
|
def reorder_distance(p_atoms, q_atoms, p_coord, q_coord):
unique_atoms = np.unique(p_atoms)
view_reorder = np.zeros(q_atoms.shape, dtype=int)
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
A_norms = np.linalg.norm(A_coord, axis=1)
B_norms = np.linalg.norm(B_coord, axis=1)
reorder_indices_A = np.argsort(A_norms)
reorder_indices_B = np.argsort(B_norms)
translator = np.argsort(reorder_indices_A)
view = reorder_indices_B[translator]
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
|
Re-orders the input atom list and xyz coordinates by atom type and then by
distance of each atom from the centroid.
Parameters
----------
atoms : array
(N,1) matrix, where N is points holding the atoms' names
coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
atoms_reordered : array
(N,1) matrix, where N is points holding the ordered atoms' names
coords_reordered : array
(N,D) matrix, where N is points and D is dimension (rows re-ordered)
|
def into_view(self):
try:
return View._from_ptr(rustcall(
_lib.lsm_index_into_view,
self._get_ptr()))
finally:
self._ptr = None
|
Converts the index into a view
|
def atleast2d_or_csr(X, dtype=None, order=None, copy=False):
return _atleast2d_or_sparse(X, dtype, order, copy, sp.csr_matrix,
"tocsr", sp.isspmatrix_csr)
|
Like numpy.atleast_2d, but converts sparse matrices to CSR format
Also, converts np.matrix to np.ndarray.
|
def install_given_reqs(
to_install,
install_options,
global_options=(),
*args, **kwargs
):
if to_install:
logger.info(
'Installing collected packages: %s',
', '.join([req.name for req in to_install]),
)
with indent_log():
for requirement in to_install:
if requirement.conflicts_with:
logger.info(
'Found existing installation: %s',
requirement.conflicts_with,
)
with indent_log():
uninstalled_pathset = requirement.uninstall(
auto_confirm=True
)
try:
requirement.install(
install_options,
global_options,
*args,
**kwargs
)
except Exception:
should_rollback = (
requirement.conflicts_with and
not requirement.install_succeeded
)
if should_rollback:
uninstalled_pathset.rollback()
raise
else:
should_commit = (
requirement.conflicts_with and
requirement.install_succeeded
)
if should_commit:
uninstalled_pathset.commit()
requirement.remove_temporary_source()
return to_install
|
Install everything in the given list.
(to be called after having downloaded and unpacked the packages)
|
def record(self):
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF File Identifier Descriptor not initialized')
if self.len_fi > 0:
if self.encoding == 'latin-1':
prefix = b'\x08'
elif self.encoding == 'utf-16_be':
prefix = b'\x10'
else:
raise pycdlibexception.PyCdlibInternalError('Invalid UDF encoding; this should not happen')
fi = prefix + self.fi
else:
fi = b''
rec = struct.pack(self.FMT, b'\x00' * 16, 1,
self.file_characteristics, self.len_fi,
self.icb.record(),
self.len_impl_use) + self.impl_use + fi + b'\x00' * UDFFileIdentifierDescriptor.pad(struct.calcsize(self.FMT) + self.len_impl_use + self.len_fi)
return self.desc_tag.record(rec[16:]) + rec[16:]
|
A method to generate the string representing this UDF File Identifier Descriptor.
Parameters:
None.
Returns:
A string representing this UDF File Identifier Descriptor.
|
def force_close(self) -> None:
self._force_close = True
if self._waiter:
self._waiter.cancel()
if self.transport is not None:
self.transport.close()
self.transport = None
|
Force close connection
|
def _check_flag_meanings(self, ds, name):
variable = ds.variables[name]
flag_meanings = getattr(variable, 'flag_meanings', None)
valid_meanings = TestCtx(BaseCheck.HIGH, self.section_titles['3.5'])
valid_meanings.assert_true(flag_meanings is not None,
"{}'s flag_meanings attribute is required for flag variables".format(name))
valid_meanings.assert_true(isinstance(flag_meanings, basestring),
"{}'s flag_meanings attribute must be a string".format(name))
if not isinstance(flag_meanings, basestring):
return valid_meanings.to_result()
valid_meanings.assert_true(len(flag_meanings) > 0,
"{}'s flag_meanings can't be empty".format(name))
flag_regx = regex.compile(r"^[0-9A-Za-z_\-.+@]+$")
meanings = flag_meanings.split()
for meaning in meanings:
if flag_regx.match(meaning) is None:
valid_meanings.assert_true(False,
"{}'s flag_meanings attribute defined an illegal flag meaning ".format(name)+\
"{}".format(meaning))
return valid_meanings.to_result()
|
Check a variable's flag_meanings attribute for compliance under CF
- flag_meanings exists
- flag_meanings is a string
- flag_meanings elements are valid strings
:param netCDF4.Dataset ds: An open netCDF dataset
:param str name: Variable name
:rtype: compliance_checker.base.Result
|
def get_filtered_isovar_epitopes(self, epitopes, ic50_cutoff):
mutant_binding_predictions = []
for binding_prediction in epitopes:
peptide = binding_prediction.peptide
peptide_offset = binding_prediction.offset
isovar_row = dict(binding_prediction.source_sequence_key)
is_mutant = contains_mutant_residues(
peptide_start_in_protein=peptide_offset,
peptide_length=len(peptide),
mutation_start_in_protein=isovar_row["variant_aa_interval_start"],
mutation_end_in_protein=isovar_row["variant_aa_interval_end"])
if is_mutant and binding_prediction.value <= ic50_cutoff:
mutant_binding_predictions.append(binding_prediction)
return EpitopeCollection(mutant_binding_predictions)
|
Mostly replicates topiary.build_epitope_collection_from_binding_predictions
Note: topiary needs to do fancy stuff like subsequence_protein_offset + binding_prediction.offset
in order to figure out whether a variant is in the peptide because it only has the variant's
offset into the full protein; but isovar gives us the variant's offset into the protein subsequence
(dictated by protein_sequence_length); so all we need to do is map that onto the smaller 8-11mer
peptides generated by mhctools.
|
def _set_people(self, people):
if hasattr(people, "object_type"):
people = [people]
elif hasattr(people, "__iter__"):
people = list(people)
return people
|
Sets who the object is sent to
|
def ensure_proc_terminate(proc):
if isinstance(proc, list):
for p in proc:
ensure_proc_terminate(p)
return
def stop_proc_by_weak_ref(ref):
proc = ref()
if proc is None:
return
if not proc.is_alive():
return
proc.terminate()
proc.join()
assert isinstance(proc, mp.Process)
atexit.register(stop_proc_by_weak_ref, weakref.ref(proc))
|
Make sure processes terminate when main process exit.
Args:
proc (multiprocessing.Process or list)
|
def _compute_gradients(self, loss_fn, x, unused_optim_state):
assert len(x) == 1 and isinstance(x, list), \
'x should be a list and contain only one image tensor'
x = x[0]
loss = reduce_mean(loss_fn(x), axis=0)
return tf.gradients(loss, x)
|
Compute a new value of `x` to minimize `loss_fn`.
Args:
loss_fn: a callable that takes `x`, a batch of images, and returns
a batch of loss values. `x` will be optimized to minimize
`loss_fn(x)`.
x: A list of Tensors, the values to be updated. This is analogous
to the `var_list` argument in standard TF Optimizer.
unused_optim_state: A (possibly nested) dict, containing any state
info needed for the optimizer.
Returns:
new_x: A list of Tensors, the same length as `x`, which are updated
new_optim_state: A dict, with the same structure as `optim_state`,
which have been updated.
|
def explore(args):
logger.info("reading sequeces")
data = load_data(args.json)
logger.info("get sequences from json")
c1, c2 = args.names.split(",")
seqs, names = get_sequences_from_cluster(c1, c2, data[0])
loci = get_precursors_from_cluster(c1, c2, data[0])
logger.info("map all sequences to all loci")
print("%s" % (loci))
map_to_precursors(seqs, names, loci, os.path.join(args.out, "map.tsv"), args)
logger.info("plot sequences on loci")
logger.info("Done")
|
Create mapping of sequences of two clusters
|
def getMaxPacketSize(self, endpoint):
result = libusb1.libusb_get_max_packet_size(self.device_p, endpoint)
mayRaiseUSBError(result)
return result
|
Get device's max packet size for given endpoint.
Warning: this function will not always give you the expected result.
See https://libusb.org/ticket/77 . You should instead consult the
endpoint descriptor of current configuration and alternate setting.
|
def apply(self, func, *args, **kwargs):
self._prep_pandas_groupby()
def key_by_index(data):
for key, row in data.iterrows():
yield (key, pd.DataFrame.from_dict(
dict([(key, row)]), orient='index'))
myargs = self._myargs
mykwargs = self._mykwargs
regroupedRDD = self._distributedRDD.mapValues(
lambda data: data.groupby(*myargs, **mykwargs))
appliedRDD = regroupedRDD.map(
lambda key_data: key_data[1].apply(func, *args, **kwargs))
reKeyedRDD = appliedRDD.flatMap(key_by_index)
dataframe = self._sortIfNeeded(reKeyedRDD).values()
return DataFrame.fromDataFrameRDD(dataframe, self.sql_ctx)
|
Apply the provided function and combine the results together in the
same way as apply from groupby in pandas.
This returns a DataFrame.
|
def gather(input):
try:
line = input.next()
except StopIteration:
return
lead = True
buffer = []
while line.kind == 'text':
value = line.line.rstrip().rstrip('\\') + ('' if line.continued else '\n')
if lead and line.stripped:
yield Line(line.number, value)
lead = False
elif not lead:
if line.stripped:
for buf in buffer:
yield buf
buffer = []
yield Line(line.number, value)
else:
buffer.append(Line(line.number, value))
try:
line = input.next()
except StopIteration:
line = None
break
if line:
input.push(line)
|
Collect contiguous lines of text, preserving line numbers.
|
def wait_closed(self):
if self._closed:
return
if not self._closing:
raise RuntimeError(
".wait_closed() should be called "
"after .close()"
)
while self._free:
conn = self._free.popleft()
if not conn.closed:
yield from conn.close()
else:
pass
with (yield from self._cond):
while self.size > self.freesize:
yield from self._cond.wait()
self._used.clear()
self._closed = True
|
Wait for closing all pool's connections.
|
def populateFromRow(self, callSetRecord):
self._biosampleId = callSetRecord.biosampleid
self.setAttributesJson(callSetRecord.attributes)
|
Populates this CallSet from the specified DB row.
|
def _make_verb_helper(verb_func, add_groups=False):
@wraps(verb_func)
def _verb_func(verb):
verb.expressions, new_columns = build_expressions(verb)
if add_groups:
verb.groups = new_columns
return verb_func(verb)
return _verb_func
|
Create function that prepares verb for the verb function
The functions created add expressions to be evaluated to
the verb, then call the core verb function
Parameters
----------
verb_func : function
Core verb function. This is the function called after
expressions created and added to the verb. The core
function should be one of those that implement verbs that
evaluate expressions.
add_groups : bool
If True, a groups attribute is added to the verb. The
groups are the columns created after evaluating the
expressions.
Returns
-------
out : function
A function that implements a helper verb.
|
def graphHasField(self, graph_name, field_name):
graph = self._graphDict.get(graph_name, True)
return graph.hasField(field_name)
|
Return true if graph with name graph_name has field with
name field_name.
@param graph_name: Graph Name
@param field_name: Field Name.
@return: Boolean
|
def update(self, events, time_passed=None):
for e in events:
if e.type == pygame.QUIT:
raise SystemExit
if e.type == pygame.KEYDOWN:
if e.key == pygame.K_ESCAPE:
raise SystemExit
if e.key == pygame.K_DOWN:
self.option += 1
if e.key == pygame.K_UP:
self.option -= 1
if e.key == pygame.K_RETURN or e.key == pygame.K_SPACE:
self.options[self.option]['callable']()
elif e.type == pygame.MOUSEBUTTONDOWN:
lb, cb, rb = pygame.mouse.get_pressed()
if lb:
self.options[self.option]['callable']()
if self.option > len(self.options) - 1:
self.option = len(self.options) - 1
elif self.option < 0:
self.option = 0
if self.mouse_enabled:
self._checkMousePositionForFocus()
if time_passed:
self._updateEffects(time_passed)
|
Update the menu and get input for the menu.
@events: the pygame catched events
@time_passed: delta time since the last call
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.