code
stringlengths 59
3.37k
| docstring
stringlengths 8
15.5k
|
---|---|
def get_layers(self, class_: Type[L], became: bool=True) -> List[L]:
out = self._index.get(class_, [])
if became:
out += self._transformed.get(class_, [])
return out
|
Returns the list of layers of a given class. If no layers are present
then the list will be empty.
:param class_: class of the expected layers
:param became: Allow transformed layers in results
|
def rm(name, filenames, recursive):
with Session() as session:
try:
if input("> Are you sure? (y/n): ").lower().strip()[:1] == 'y':
session.VFolder(name).delete_files(
filenames,
recursive=recursive)
print_done('Done.')
except Exception as e:
print_error(e)
sys.exit(1)
|
Delete files in a virtual folder.
If one of the given paths is a directory and the recursive option is enabled,
all its content and the directory itself are recursively deleted.
This operation is irreversible!
\b
NAME: Name of a virtual folder.
FILENAMES: Paths of the files to delete.
|
def get(self, path, payload=None, headers=None):
return self._request('get', path, payload, headers)
|
HTTP GET operation.
:param path: URI Path
:param payload: HTTP Body
:param headers: HTTP Headers
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:return: Response
|
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
|
Create all the covariance matrices from a given template.
|
def build(self):
return Intent(self.name, self.requires, self.at_least_one, self.optional)
|
Constructs an intent from the builder's specifications.
:return: an Intent instance.
|
def _is_reachable(self, server):
try:
server['redis'].ping()
return True
except UserWarning:
self._logger.warn('Cannot reach redis server: ' + server['url'])
except Exception:
self._logger.warn('Cannot reach redis server: ' + server['url'])
return False
|
Checks if the given redis server is reachable
|
def validate_regex(regex, sub_service):
if regex.pattern.startswith('^') is False:
logger.debug('StackInABoxService: Pattern must start with ^')
raise InvalidRouteRegexError('Pattern must start with ^')
if regex.pattern.endswith('$') is False and sub_service is False:
logger.debug('StackInABoxService: Pattern must end with $')
raise InvalidRouteRegexError('Pattern must end with $')
if regex.pattern.endswith('$') is True and sub_service is True:
logger.debug(
'StackInABoxService: Sub-Service RegEx Pattern must not '
'end with $')
raise InvalidRouteRegexError('Pattern must end with $')
|
Is the regex valid for StackInABox routing?
:param regex: Python regex object to match the URI
:param sub_service: boolean for whether or not the regex is for
a sub-service
:raises: InvalidRouteRegexError if the regex does not meet the
requirements.
|
def aln2logodata(aln):
handle = StringIO(aln.format('fasta'))
logodata = read_logodata(handle)
handle.close()
return logodata
|
Get weblogo data for an alignment object.
Returns a list of tuples: (posn, letter_counts, entropy, weight)
|
def enable(logger=logger,
level=logging.INFO,
format=DETAIL_LOG_FORMAT,
echo=True):
global _handler
if _handler is None:
_handler = logging.StreamHandler()
formatter = logging.Formatter(format)
_handler.setFormatter(formatter)
level = logging._checkLevel(level)
levelName = logging._levelToName[level]
logger.setLevel(level)
_handler.setLevel(level)
if _handler not in logger.handlers:
logger.addHandler(_handler)
if echo:
logger.log(
level, 'Logging enabled at level {name}.'.format(name=levelName))
|
Enable simple console logging for this module
|
def get_vmss(access_token, subscription_id, resource_group, vmss_name):
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', resource_group,
'/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name,
'?api-version=', COMP_API])
return do_get(endpoint, access_token)
|
Get virtual machine scale set details.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
vmss_name (str): Name of the virtual machine scale set.
Returns:
HTTP response. JSON body of scale set properties.
|
def _iter_all_paths(start, end, rand=False, path=tuple()):
path = path + (start, )
if start is end:
yield path
else:
nodes = [start.lo, start.hi]
if rand:
random.shuffle(nodes)
for node in nodes:
if node is not None:
yield from _iter_all_paths(node, end, rand, path)
|
Iterate through all paths from start to end.
|
def lines(self) -> str:
if self.definition is None:
return ''
source = ''
lines = self.definition.source
offset = self.definition.start
lines_stripped = list(reversed(list(dropwhile(is_blank,
reversed(lines)))))
numbers_width = len(str(offset + len(lines_stripped)))
line_format = '{{:{}}}:{{}}'.format(numbers_width)
for n, line in enumerate(lines_stripped):
if line:
line = ' ' + line
source += line_format.format(n + offset, line)
if n > 5:
source += ' ...\n'
break
return source
|
Return the source code lines for this error.
|
def gfrefn(t1, t2, s1, s2):
t1 = ctypes.c_double(t1)
t2 = ctypes.c_double(t2)
s1 = ctypes.c_int(s1)
s2 = ctypes.c_int(s2)
t = ctypes.c_double()
libspice.gfrefn_c(t1, t2, s1, s2, ctypes.byref(t))
return t.value
|
For those times when we can't do better, we use a bisection
method to find the next time at which to test for state change.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/gfrefn_c.html
:param t1: One of two values bracketing a state change.
:type t1: float
:param t2: The other value that brackets a state change.
:type t2: float
:param s1: State at t1.
:type s1: bool
:param s2: State at t2.
:type s2: bool
:return: New value at which to check for transition.
:rtype: float
|
def shorted(text, width=79):
if len(text) <= width:
return text
return u"{0}...".format(re.sub(r"\W+\w*$", "", text[:width - 2]))
|
Shorten text, make sure it's not cut in the middle of a word
|
def on_click(self, button, **kwargs):
actions = ['leftclick', 'middleclick', 'rightclick',
'upscroll', 'downscroll']
try:
action = actions[button - 1]
except (TypeError, IndexError):
self.__log_button_event(button, None, None, "Other button")
action = "otherclick"
m_click = self.__multi_click
with m_click.lock:
double = m_click.check_double(button)
double_action = 'double%s' % action
if double:
action = double_action
cb = getattr(self, 'on_%s' % action, None)
double_handler = getattr(self, 'on_%s' % double_action, None)
delay_execution = (not double and double_handler)
if delay_execution:
m_click.set_timer(button, cb, **kwargs)
else:
self.__button_callback_handler(button, cb, **kwargs)
|
Maps a click event with its associated callback.
Currently implemented events are:
============ ================ =========
Event Callback setting Button ID
============ ================ =========
Left click on_leftclick 1
Middle click on_middleclick 2
Right click on_rightclick 3
Scroll up on_upscroll 4
Scroll down on_downscroll 5
Others on_otherclick > 5
============ ================ =========
The action is determined by the nature (type and value) of the callback
setting in the following order:
1. If null callback (``None``), no action is taken.
2. If it's a `python function`, call it and pass any additional
arguments.
3. If it's name of a `member method` of current module (string), call
it and pass any additional arguments.
4. If the name does not match with `member method` name execute program
with such name.
.. seealso:: :ref:`callbacks` for more information about
callback settings and examples.
:param button: The ID of button event received from i3bar.
:param kwargs: Further information received from i3bar like the
positions of the mouse where the click occured.
:return: Returns ``True`` if a valid callback action was executed.
``False`` otherwise.
|
def fastMean(img, f=10, inplace=False):
s0,s1 = img.shape[:2]
ss0 = int(round(s0/f))
ss1 = int(round(s1/f))
small = cv2.resize(img,(ss1,ss0), interpolation=cv2.INTER_AREA)
k = {'interpolation':cv2.INTER_LINEAR}
if inplace:
k['dst']=img
return cv2.resize(small,(s1,s0), **k)
|
for bigger ksizes it if often faster to resize an image
rather than blur it...
|
def deploy():
_require_root()
if not confirm("This will apply any available migrations to the database. Has the database been backed up?"):
abort("Aborted.")
if not confirm("Are you sure you want to deploy?"):
abort("Aborted.")
with lcd(PRODUCTION_DOCUMENT_ROOT):
with shell_env(PRODUCTION="TRUE"):
local("git pull")
with open("requirements.txt", "r") as req_file:
requirements = req_file.read().strip().split()
try:
pkg_resources.require(requirements)
except pkg_resources.DistributionNotFound:
local("pip install -r requirements.txt")
except Exception:
traceback.format_exc()
local("pip install -r requirements.txt")
else:
puts("Python requirements already satisfied.")
with prefix("source /usr/local/virtualenvs/ion/bin/activate"):
local("./manage.py collectstatic --noinput", shell="/bin/bash")
local("./manage.py migrate", shell="/bin/bash")
restart_production_gunicorn(skip=True)
puts("Deploy complete.")
|
Deploy to production.
|
def render_pdf_file_to_image_files(pdf_file_name, output_filename_root, program_to_use):
res_x = str(args.resX)
res_y = str(args.resY)
if program_to_use == "Ghostscript":
if ex.system_os == "Windows":
ex.render_pdf_file_to_image_files__ghostscript_bmp(
pdf_file_name, output_filename_root, res_x, res_y)
else:
ex.render_pdf_file_to_image_files__ghostscript_png(
pdf_file_name, output_filename_root, res_x, res_y)
elif program_to_use == "pdftoppm":
use_gray = False
if use_gray:
ex.render_pdf_file_to_image_files_pdftoppm_pgm(
pdf_file_name, output_filename_root, res_x, res_y)
else:
ex.render_pdf_file_to_image_files_pdftoppm_ppm(
pdf_file_name, output_filename_root, res_x, res_y)
else:
print("Error in renderPdfFileToImageFile: Unrecognized external program.",
file=sys.stderr)
ex.cleanup_and_exit(1)
|
Render all the pages of the PDF file at pdf_file_name to image files with
path and filename prefix given by output_filename_root. Any directories must
have already been created, and the calling program is responsible for
deleting any directories or image files. The program program_to_use,
currently either the string "pdftoppm" or the string "Ghostscript", will be
called externally. The image type that the PDF is converted into must to be
directly openable by PIL.
|
def sample(self, cursor):
count = cursor.count()
if count == 0:
self._empty = True
raise ValueError("Empty collection")
if self.p >= 1 and self.max_items <= 0:
for item in cursor:
yield item
return
if self.max_items <= 0:
n_target = max(self.min_items, self.p * count)
else:
if self.p <= 0:
n_target = max(self.min_items, self.max_items)
else:
n_target = max(self.min_items, min(self.max_items, self.p * count))
if n_target == 0:
raise ValueError("No items requested")
n = 0
while n < n_target:
try:
item = next(cursor)
except StopIteration:
cursor.rewind()
item = next(cursor)
if self._keep():
yield item
n += 1
|
Extract records randomly from the database.
Continue until the target proportion of the items have been
extracted, or until `min_items` if this is larger.
If `max_items` is non-negative, do not extract more than these.
This function is a generator, yielding items incrementally.
:param cursor: Cursor to sample
:type cursor: pymongo.cursor.Cursor
:return: yields each item
:rtype: dict
:raise: ValueError, if max_items is valid and less than `min_items`
or if target collection is empty
|
def from_binary(cls,pst,filename):
m = Matrix.from_binary(filename)
return ObservationEnsemble(data=m.x,pst=pst, index=m.row_names)
|
instantiate an observation obsemble from a jco-type file
Parameters
----------
pst : pyemu.Pst
a Pst instance
filename : str
the binary file name
Returns
-------
oe : ObservationEnsemble
|
def get_default_ENV(env):
global default_ENV
try:
return env['ENV']
except KeyError:
if not default_ENV:
import SCons.Environment
default_ENV = SCons.Environment.Environment()['ENV']
return default_ENV
|
A fiddlin' little function that has an 'import SCons.Environment' which
can't be moved to the top level without creating an import loop. Since
this import creates a local variable named 'SCons', it blocks access to
the global variable, so we move it here to prevent complaints about local
variables being used uninitialized.
|
def get_gradebook_column_form_for_create(self, gradebook_column_record_types):
for arg in gradebook_column_record_types:
if not isinstance(arg, ABCType):
raise errors.InvalidArgument('one or more argument array elements is not a valid OSID Type')
if gradebook_column_record_types == []:
obj_form = objects.GradebookColumnForm(
gradebook_id=self._catalog_id,
runtime=self._runtime,
effective_agent_id=self.get_effective_agent_id(),
proxy=self._proxy)
else:
obj_form = objects.GradebookColumnForm(
gradebook_id=self._catalog_id,
record_types=gradebook_column_record_types,
runtime=self._runtime,
effective_agent_id=self.get_effective_agent_id(),
proxy=self._proxy)
self._forms[obj_form.get_id().get_identifier()] = not CREATED
return obj_form
|
Gets the gradebook column form for creating new gradebook columns.
A new form should be requested for each create transaction.
arg: gradebook_column_record_types (osid.type.Type[]): array
of gradebook column record types
return: (osid.grading.GradebookColumnForm) - the gradebook
column form
raise: NullArgument - ``gradebook_column_record_types`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - unable to get form for requested record
types
*compliance: mandatory -- This method must be implemented.*
|
def spellcheck(contents, technical_terms=None, spellcheck_cache=None):
contents = spelling.filter_nonspellcheckable_tokens(contents)
contents = _filter_disabled_regions(contents)
lines = contents.splitlines(True)
user_words, valid_words = valid_words_dictionary.create(spellcheck_cache)
technical_words = technical_words_dictionary.create(technical_terms,
spellcheck_cache)
return sorted([e for e in spellcheck_region(lines,
valid_words,
technical_words,
user_words)])
|
Run spellcheck on the contents of a file.
:technical_terms: is a path to a file containing a list of "technical"
terms. These may be symbols as collected from files by using
the generic linter or other such symbols. If a symbol-like term is
used within contents and it does not appear in :technical_terms: then
an error will result.
:spellcheck_cache: is a path to a directory where graph files generated
by the spellchecking engine should be stored. It is used for caching
purposes between invocations, since generating the spellchecking
graph is an expensive operation which can take a few seconds to complete.
|
def parse(server, data, initpath):
STREAMCLS = {1: VideoStream, 2: AudioStream, 3: SubtitleStream}
stype = cast(int, data.attrib.get('streamType'))
cls = STREAMCLS.get(stype, MediaPartStream)
return cls(server, data, initpath)
|
Factory method returns a new MediaPartStream from xml data.
|
def validate_full_name(self, full_name, timeout=-1):
uri = self.URI + '/validateUserName/' + full_name
return self._client.create_with_zero_body(uri=uri, timeout=timeout)
|
Verifies if a fullName is already in use.
Args:
full_name:
The fullName to be verified.
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in
OneView, just stops waiting for its completion.
Returns: True if full name is in use, False if it is not.
|
def make_default(spec):
doc = {}
for key, field in spec.iteritems():
if field.default is not no_default:
doc[key] = field.default
return doc
|
Create an empty document that follows spec. Any field with a default
will take that value, required or not. Required fields with no default
will get a value of None. If your default value does not match your
type or otherwise customized Field class, this can create a spec that
fails validation.
|
def paths(self, value):
if value is None:
value = ()
elif isinstance(value, string_types):
value = (value, )
self._paths = tuple(value)
if self.autoconf:
self.applyconfiguration()
|
Change of paths in adding it in watching list.
|
def delete_record(self, record):
self.children.remove(record.resource)
record.delete()
|
Remove a DNSRecord
Args:
record (:obj:`DNSRecord`): :obj:`DNSRecord` to remove
Returns:
`None`
|
def group_paragraphs(indent_paragraphs):
root = Node(0, [], None)
current_node = root
previous_indent = -1
for indent, lines in indent_paragraphs:
if indent > previous_indent:
current_node = create_child_node(current_node, indent, lines)
elif indent == previous_indent:
current_node = create_sibling_node(current_node, indent, lines)
elif indent < previous_indent:
current_node = create_uncle_node(current_node, indent, lines)
previous_indent = indent
return root
|
Group paragraphs so that more indented paragraphs become children of less
indented paragraphs.
|
def gen_array_crud():
if WORK_BOOK:
pass
else:
return False
papa_id = 0
switch_dics = {}
kind_dics = {}
for work_sheet in WORK_BOOK:
kind_sig = str(work_sheet['A1'].value).strip()
for row_num in range(3, 1000):
a_cell_value = work_sheet['A{0}'.format(row_num)].value
b_cell_val = work_sheet['B{0}'.format(row_num)].value
if a_cell_value or b_cell_val:
pass
else:
break
if a_cell_value and a_cell_value != '':
papa_id = a_cell_value.strip()[1:]
u_dic = __get_switch_arr(work_sheet, row_num)
switch_dics['dic_{0}00'.format(papa_id)] = u_dic
kind_dics['kind_{0}00'.format(papa_id)] = kind_sig
if b_cell_val and b_cell_val != '':
sun_id = b_cell_val.strip()[1:]
if len(sun_id) == 4:
app_uid = sun_id
else:
app_uid = '{0}{1}'.format(papa_id, sun_id)
u_dic = __get_switch_arr(work_sheet, row_num)
switch_dics['dic_{0}'.format(app_uid)] = u_dic
kind_dics['kind_{0}'.format(app_uid)] = kind_sig
return (switch_dics, kind_dics)
|
Return the dictionay of the switcher form XLXS file.
if valud of the column of the row is `1`, it will be added to the array.
|
def compute_discounts(self, precision=None):
return sum([line.compute_discounts(precision) for line in self.__lines])
|
Returns the total amount of discounts of this group.
@param precision:int Total amount of discounts
@return: Decimal
|
def import_price(self, price: PriceModel):
symbol = price.symbol
if "." in symbol:
symbol = price.symbol.split(".")[0]
stock = SecuritiesAggregate(self.book).get_by_symbol(symbol)
if stock is None:
logging.warning("security %s not found in book.", price.symbol)
return False
existing_prices = stock.prices.filter(Price.date == price.datetime.date()).all()
if not existing_prices:
self.__create_price_for(stock, price)
else:
logging.warning("price already exists for %s on %s",
stock.mnemonic, price.datetime.strftime("%Y-%m-%d"))
existing_price = existing_prices[0]
existing_price.value = price.value
return True
|
Import individual price
|
def _call(self, x, out=None):
if out is None:
out = self.range.element()
ndim = self.range.ndim
dx = self.range.cell_sides
tmp = np.empty(out.shape, out.dtype, order=out.space.default_order)
with writable_array(out) as out_arr:
for axis in range(ndim):
finite_diff(x[axis], axis=axis, dx=dx[axis],
method=self.method, pad_mode=self.pad_mode,
pad_const=self.pad_const,
out=tmp)
if axis == 0:
out_arr[:] = tmp
else:
out_arr += tmp
return out
|
Calculate the divergence of ``x``.
|
def get_bins(self):
catalogs = self._get_provider_session('bin_lookup_session').get_bins()
cat_list = []
for cat in catalogs:
cat_list.append(Bin(self._provider_manager, cat, self._runtime, self._proxy))
return BinList(cat_list)
|
Pass through to provider BinLookupSession.get_bins
|
def parse_keyring(self, namespace=None):
results = {}
if not keyring:
return results
if not namespace:
namespace = self.prog
for option in self._options:
secret = keyring.get_password(namespace, option.name)
if secret:
results[option.dest] = option.type(secret)
return results
|
Find settings from keyring.
|
def dense(*elements):
if len(elements) == 1 and not isinstance(elements[0], (float, int, long)):
elements = elements[0]
return DenseVector(elements)
|
Create a dense vector of 64-bit floats from a Python list or numbers.
>>> Vectors.dense([1, 2, 3])
DenseVector([1.0, 2.0, 3.0])
>>> Vectors.dense(1.0, 2.0)
DenseVector([1.0, 2.0])
|
def product_metadata(product, dst_folder, counter=None, writers=[file_writer], geometry_check=None):
if not counter:
counter = {
'products': 0,
'saved_tiles': 0,
'skipped_tiles': 0,
'skipped_tiles_paths': []
}
s3_url = 'http://sentinel-s2-l1c.s3.amazonaws.com'
product_meta_link = '{0}/{1}'.format(s3_url, product['metadata'])
product_info = requests.get(product_meta_link, stream=True)
product_metadata = metadata_to_dict(product_info.raw)
product_metadata['product_meta_link'] = product_meta_link
counter['products'] += 1
for tile in product['tiles']:
tile_info = requests.get('{0}/{1}'.format(s3_url, tile))
try:
metadata = tile_metadata(tile_info.json(), copy(product_metadata), geometry_check)
for w in writers:
w(dst_folder, metadata)
logger.info('Saving to disk: %s' % metadata['tile_name'])
counter['saved_tiles'] += 1
except JSONDecodeError:
logger.warning('Tile: %s was not found and skipped' % tile)
counter['skipped_tiles'] += 1
counter['skipped_tiles_paths'].append(tile)
return counter
|
Extract metadata for a specific product
|
async def handle_http_exception(self, error: Exception) -> Response:
handler = self._find_exception_handler(error)
if handler is None:
return error.get_response()
else:
return await handler(error)
|
Handle a HTTPException subclass error.
This will attempt to find a handler for the error and if fails
will fall back to the error response.
|
def unit(self, parameter):
"Get the unit for given parameter"
parameter = self._get_parameter_name(parameter).lower()
return self._parameters[parameter]['Unit']
|
Get the unit for given parameter
|
def accept(self):
newsock, addr = socket.accept(self)
ssl_sock = SSLSocket(newsock._sock,
keyfile=self.keyfile,
certfile=self.certfile,
server_side=True,
cert_reqs=self.cert_reqs,
ssl_version=self.ssl_version,
ca_certs=self.ca_certs,
do_handshake_on_connect=self.do_handshake_on_connect,
suppress_ragged_eofs=self.suppress_ragged_eofs,
ciphers=self.ciphers)
return ssl_sock, addr
|
Accepts a new connection from a remote client, and returns
a tuple containing that new connection wrapped with a server-side
SSL channel, and the address of the remote client.
|
def generate(self):
x, y, z = self.point1
return (x + self.size_x * random(),
y + self.size_y * random(),
z + self.size_z * random())
|
Return a random point inside the box
|
def FindMessageTypeByName(self, full_name):
full_name = _NormalizeFullyQualifiedName(full_name)
if full_name not in self._descriptors:
self._FindFileContainingSymbolInDb(full_name)
return self._descriptors[full_name]
|
Loads the named descriptor from the pool.
Args:
full_name: The full name of the descriptor to load.
Returns:
The descriptor for the named type.
Raises:
KeyError: if the message cannot be found in the pool.
|
def get_bookmarks(self, folder='unread', limit=25, have=None):
path = 'bookmarks/list'
params = {'folder_id': folder, 'limit': limit}
if have:
have_concat = ','.join(str(id_) for id_ in have)
params['have'] = have_concat
response = self.request(path, params)
items = response['data']
bookmarks = []
for item in items:
if item.get('type') == 'error':
raise Exception(item.get('message'))
elif item.get('type') == 'bookmark':
bookmarks.append(Bookmark(self, **item))
return bookmarks
|
Return list of user's bookmarks.
:param str folder: Optional. Possible values are unread (default),
starred, archive, or a folder_id value.
:param int limit: Optional. A number between 1 and 500, default 25.
:param list have: Optional. A list of IDs to exclude from results
:returns: List of user's bookmarks
:rtype: list
|
def namespace(self, elem=None):
if elem is None:
elem = self.root
return XML.tag_namespace(elem.tag)
|
return the URL, if any, for the doc root or elem, if given.
|
def diff(self, dt=None, abs=True):
if dt is None:
dt = self.today()
return Period(self, Date(dt.year, dt.month, dt.day), absolute=abs)
|
Returns the difference between two Date objects as a Period.
:type dt: Date or None
:param abs: Whether to return an absolute interval or not
:type abs: bool
:rtype: Period
|
def MultiOpen(self, urns, aff4_type=None, mode="r"):
not_opened_urns = []
_ValidateAFF4Type(aff4_type)
for urn in urns:
key = self._ObjectKey(urn, mode)
try:
result = self._objects_cache[key]
if aff4_type is not None and not isinstance(result, aff4_type):
continue
yield result
except KeyError:
not_opened_urns.append(urn)
if not_opened_urns:
for obj in FACTORY.MultiOpen(
not_opened_urns, follow_symlinks=False, mode=mode, token=self._token):
key = self._ObjectKey(obj.urn, mode)
self._objects_cache[key] = obj
if aff4_type is not None and not isinstance(obj, aff4_type):
continue
yield obj
|
Opens many urns efficiently, returning cached objects when possible.
|
def _run_env(self):
env = dict(os.environ)
env.update(
getattr(self, 'env', {}),
PYTHONUSERBASE=self.env_path,
PIP_USER="1",
)
self._disable_venv(env)
return env
|
Augment the current environment providing the PYTHONUSERBASE.
|
def cast(self, value, custom_formatters=None, strict=True):
if value is None:
if not self.nullable:
raise InvalidSchemaValue("Null value for non-nullable schema", value, self.type)
return self.default
cast_mapping = self.get_cast_mapping(
custom_formatters=custom_formatters, strict=strict)
if self.type is not SchemaType.STRING and value == '':
return None
cast_callable = cast_mapping[self.type]
try:
return cast_callable(value)
except ValueError:
raise InvalidSchemaValue(
"Failed to cast value {value} to type {type}", value, self.type)
|
Cast value to schema type
|
def render(self, rectangle, data):
size = (1.0 - 2.0*self.margin) * rectangle.h
offset = self.margin * rectangle.h
per_mark = 1.0 / float(self.total)
bottom = offset + size * float(self.index) * per_mark
top = bottom + per_mark * size
c = data['output']
with c:
c.translate(rectangle.x, rectangle.y)
c.draw_polygon(
0, top, -self.width, bottom, self.width, bottom,
fill=self.color
)
|
Draws the signature mark.
Note that this draws OUTSIDE the rectangle we're given. If
cropping is involved, then this obviously won't work.
|
def to_glyphs_glyph_anchors(self, ufo_glyph, layer):
for ufo_anchor in ufo_glyph.anchors:
anchor = self.glyphs_module.GSAnchor()
anchor.name = ufo_anchor.name
anchor.position = Point(ufo_anchor.x, ufo_anchor.y)
layer.anchors.append(anchor)
|
Add UFO glif anchors to a GSLayer.
|
def RaiseIfLastError(result, func = None, arguments = ()):
code = GetLastError()
if code != ERROR_SUCCESS:
raise ctypes.WinError(code)
return result
|
Error checking for Win32 API calls with no error-specific return value.
Regardless of the return value, the function calls GetLastError(). If the
code is not C{ERROR_SUCCESS} then a C{WindowsError} exception is raised.
For this to work, the user MUST call SetLastError(ERROR_SUCCESS) prior to
calling the API. Otherwise an exception may be raised even on success,
since most API calls don't clear the error status code.
|
def _rescale_to_unit_diagonals(mat):
d = np.sqrt(np.diag(mat))
mat /= d
mat /= d[:, np.newaxis]
return mat
|
Rescale matrix to have unit diagonals.
Note: Call only after diagonal dominance is ensured.
|
def encode(cls, value):
if value not in [True, False]:
raise InvalidValue('not a boolean')
return b'1' if value else b''
|
convert a boolean value into something we can persist to redis.
An empty string is the representation for False.
:param value: bool
:return: bytes
|
def _list2array(lst):
if lst and isinstance(lst[0], cp.ndarray):
return cp.hstack(lst)
else:
return cp.asarray(lst)
|
Convert a list to a numpy array.
|
def read_targets(targets):
results = {}
for target, regexer in regexer_for_targets(targets):
with open(target) as fh:
results.update(extract_keypairs(fh.readlines(), regexer))
return results
|
Reads generic key-value pairs from input files
|
async def seek(self, position: int):
if self.current.seekable:
position = max(min(position, self.current.length), 0)
await self.node.seek(self.channel.guild.id, position)
|
If the track allows it, seeks to a position.
Parameters
----------
position : int
Between 0 and track length.
|
def _set_worker_thread_level(self):
bthread_logger = logging.getLogger(
'google.cloud.logging.handlers.transports.background_thread')
if self.debug_thread_worker:
bthread_logger.setLevel(logging.DEBUG)
else:
bthread_logger.setLevel(logging.INFO)
|
Sets logging level of the background logging thread to DEBUG or INFO
|
def can_create_assets(self):
url_path = construct_url('authorization',
bank_id=self._catalog_idstr)
return self._get_request(url_path)['assetHints']['canCreate']
|
Tests if this user can create ``Assets``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known creating an ``Asset``
will result in a ``PermissionDenied``. This is intended as a
hint to an application that may opt not to offer create
operations to an unauthorized user.
:return: ``false`` if ``Asset`` creation is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
|
def GetIPAddresses(self):
results = []
for address in self.addresses:
human_readable_address = address.human_readable_address
if human_readable_address is not None:
results.append(human_readable_address)
return results
|
Return a list of IP addresses.
|
def get_uses_implied_permission_list(self):
target_sdk_version = self.get_effective_target_sdk_version()
READ_CALL_LOG = 'android.permission.READ_CALL_LOG'
READ_CONTACTS = 'android.permission.READ_CONTACTS'
READ_EXTERNAL_STORAGE = 'android.permission.READ_EXTERNAL_STORAGE'
READ_PHONE_STATE = 'android.permission.READ_PHONE_STATE'
WRITE_CALL_LOG = 'android.permission.WRITE_CALL_LOG'
WRITE_CONTACTS = 'android.permission.WRITE_CONTACTS'
WRITE_EXTERNAL_STORAGE = 'android.permission.WRITE_EXTERNAL_STORAGE'
implied = []
implied_WRITE_EXTERNAL_STORAGE = False
if target_sdk_version < 4:
if WRITE_EXTERNAL_STORAGE not in self.permissions:
implied.append([WRITE_EXTERNAL_STORAGE, None])
implied_WRITE_EXTERNAL_STORAGE = True
if READ_PHONE_STATE not in self.permissions:
implied.append([READ_PHONE_STATE, None])
if (WRITE_EXTERNAL_STORAGE in self.permissions or implied_WRITE_EXTERNAL_STORAGE) \
and READ_EXTERNAL_STORAGE not in self.permissions:
maxSdkVersion = None
for name, version in self.uses_permissions:
if name == WRITE_EXTERNAL_STORAGE:
maxSdkVersion = version
break
implied.append([READ_EXTERNAL_STORAGE, maxSdkVersion])
if target_sdk_version < 16:
if READ_CONTACTS in self.permissions \
and READ_CALL_LOG not in self.permissions:
implied.append([READ_CALL_LOG, None])
if WRITE_CONTACTS in self.permissions \
and WRITE_CALL_LOG not in self.permissions:
implied.append([WRITE_CALL_LOG, None])
return implied
|
Return all permissions implied by the target SDK or other permissions.
:rtype: list of string
|
def internal_link_sets(self):
if not self.__internal_link_sets:
self.__internal_link_sets = InternalLinkSets(self.__connection)
return self.__internal_link_sets
|
Gets the InternalLinkSets API client.
Returns:
InternalLinkSets:
|
def lookup(sock, domain, cache = None):
domain = normalize_domain(domain)
reply = sam_cmd(sock, "NAMING LOOKUP NAME=%s" % domain)
b64_dest = reply.get('VALUE')
if b64_dest:
dest = Dest(b64_dest, encoding='base64')
if cache:
cache[dest.base32 + '.b32.i2p'] = dest
return dest
else:
raise NSError('Domain name %r not resolved because %r' % (domain, reply))
|
lookup an I2P domain name, returning a Destination instance
|
def _download_ontology(url, local_file):
current_app.logger.debug(
"Copying remote ontology '%s' to file '%s'." % (url, local_file)
)
try:
request = requests.get(url, stream=True)
if request.status_code == 200:
with open(local_file, 'wb') as f:
for chunk in request.iter_content(chunk_size):
f.write(chunk)
except IOError as e:
current_app.logger.exception(e)
return False
else:
current_app.logger.debug("Done copying.")
return True
|
Download the ontology and stores it in CLASSIFIER_WORKDIR.
|
def _CheckLocation(self, file_entry, search_depth):
if self._location_segments is None:
return False
if search_depth < 0 or search_depth > self._number_of_location_segments:
return False
if search_depth == 0:
segment_name = ''
else:
segment_name = self._location_segments[search_depth - 1]
if self._is_regex:
if isinstance(segment_name, py2to3.STRING_TYPES):
flags = re.DOTALL | re.UNICODE
if not self._is_case_sensitive:
flags |= re.IGNORECASE
try:
segment_name = r'^{0:s}$'.format(segment_name)
segment_name = re.compile(segment_name, flags=flags)
except sre_constants.error:
return False
self._location_segments[search_depth - 1] = segment_name
elif not self._is_case_sensitive:
segment_name = segment_name.lower()
self._location_segments[search_depth - 1] = segment_name
if search_depth > 0:
if self._is_regex:
if not segment_name.match(file_entry.name):
return False
elif self._is_case_sensitive:
if segment_name != file_entry.name:
return False
elif segment_name != file_entry.name.lower():
return False
return True
|
Checks the location find specification.
Args:
file_entry (FileEntry): file entry.
search_depth (int): number of location path segments to compare.
Returns:
bool: True if the file entry matches the find specification, False if not.
|
def get_user_stats(self, name):
req = self.conn.get(BASE_URL + "/user/" + name)
if req.status_code != 200 or not name:
return None
return self.conn.make_api_call("getUserInfo", {"name": name})
|
Return data about the given user. Returns None if user
does not exist.
|
def toString(self):
string = "Layer '%s': (Kind: %s, Size: %d, Active: %d, Frozen: %d)\n" % (
self.name, self.kind, self.size, self.active, self.frozen)
if (self.type == 'Output'):
string += toStringArray('Target ', self.target, self.displayWidth)
string += toStringArray('Activation', self.activation, self.displayWidth)
if (self.type != 'Input' and self._verbosity > 1):
string += toStringArray('Error ', self.error, self.displayWidth)
if (self._verbosity > 4 and self.type != 'Input'):
string += toStringArray('weight ', self.weight, self.displayWidth)
string += toStringArray('dweight ', self.dweight, self.displayWidth)
string += toStringArray('delta ', self.delta, self.displayWidth)
string += toStringArray('netinput ', self.netinput, self.displayWidth)
string += toStringArray('wed ', self.wed, self.displayWidth)
return string
|
Returns a string representation of Layer instance.
|
def once(self, event, callback):
'Define a callback to handle the first event emitted by the server'
self._once_events.add(event)
self.on(event, callback)
|
Define a callback to handle the first event emitted by the server
|
def register(self, newitems, *args, **kwargs):
newkeys = newitems.viewkeys()
if any(self.viewkeys() & newkeys):
raise DuplicateRegItemError(self.viewkeys() & newkeys)
self.update(newitems)
kwargs.update(zip(self.meta_names, args))
for k, v in kwargs.iteritems():
meta = getattr(self, k)
if v:
if not v.viewkeys() <= newkeys:
raise MismatchRegMetaKeysError(newkeys - v.viewkeys())
meta.update(v)
|
Register newitems in registry.
:param newitems: New items to add to registry. When registering new
items, keys are not allowed to override existing keys in the
registry.
:type newitems: mapping
:param args: Positional arguments with meta data corresponding to order
of meta names class attributes
:param kwargs: Maps of corresponding meta for new keys. Each set of
meta keys must be a subset of the new item keys.
:raises:
:exc:`~simkit.core.exceptions.DuplicateRegItemError`,
:exc:`~simkit.core.exceptions.MismatchRegMetaKeysError`
|
def term_width():
if fcntl and termios:
try:
winsize = fcntl.ioctl(0, termios.TIOCGWINSZ, ' ')
_, width = struct.unpack('hh', winsize)
return width
except IOError:
pass
elif windll and create_string_buffer:
stderr_handle, struct_size = -12, 22
handle = windll.kernel32.GetStdHandle(stderr_handle)
csbi = create_string_buffer(struct_size)
res = windll.kernel32.GetConsoleScreenBufferInfo(handle, csbi)
if res:
(_, _, _, _, _, left, _, right, _,
_, _) = struct.unpack('hhhhHhhhhhh', csbi.raw)
return right - left + 1
else:
return 0
|
Return the column width of the terminal, or ``None`` if it can't be
determined.
|
def reward_wall(self):
if not 'wall' in self.mode:
return
mode = self.mode['wall']
if mode and mode and self.__test_cond(mode):
self.logger.debug("Wall {x}/{y}'".format(x=self.bumped_x, y=self.bumped_y))
self.player.stats['reward'] += mode['reward']
self.player.game_over = self.player.game_over or mode['terminal']
|
Add a wall collision reward
|
def ocr(img, mrz_mode=True, extra_cmdline_params=''):
input_file_name = '%s.bmp' % _tempnam()
output_file_name_base = '%s' % _tempnam()
output_file_name = "%s.txt" % output_file_name_base
try:
if str(img.dtype).startswith('float') and np.nanmin(img) >= 0 and np.nanmax(img) <= 1:
img = img.astype(np.float64) * (np.power(2.0, 8) - 1) + 0.499999999
img = img.astype(np.uint8)
imwrite(input_file_name, img)
if mrz_mode:
config = ("--psm 6 -c tessedit_char_whitelist=ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789><"
" -c load_system_dawg=F -c load_freq_dawg=F {}").format(extra_cmdline_params)
else:
config = "{}".format(extra_cmdline_params)
pytesseract.run_tesseract(input_file_name,
output_file_name_base,
'txt',
lang=None,
config=config)
if sys.version_info.major == 3:
f = open(output_file_name, encoding='utf-8')
else:
f = open(output_file_name)
try:
return f.read().strip()
finally:
f.close()
finally:
pytesseract.cleanup(input_file_name)
pytesseract.cleanup(output_file_name)
|
Runs Tesseract on a given image. Writes an intermediate tempfile and then runs the tesseract command on the image.
This is a simplified modification of image_to_string from PyTesseract, which is adapted to SKImage rather than PIL.
In principle we could have reimplemented it just as well - there are some apparent bugs in PyTesseract, but it works so far :)
:param mrz_mode: when this is True (default) the tesseract is configured to recognize MRZs rather than arbitrary texts.
When False, no specific configuration parameters are passed (and you are free to provide your own via `extra_cmdline_params`)
:param extra_cmdline_params: extra parameters passed to tesseract. When mrz_mode=True, these are appended to whatever is the
"best known" configuration at the moment.
"--oem 0" is the parameter you might want to pass. This selects the Tesseract's "legacy" OCR engine, which often seems
to work better than the new LSTM-based one.
|
def sheets(self):
data = Dict()
for src in [src for src in self.zipfile.namelist() if 'xl/worksheets/' in src]:
name = os.path.splitext(os.path.basename(src))[0]
xml = self.xml(src)
data[name] = xml
return data
|
return the sheets of data.
|
def acquire_code(args, session, session3):
serial_number = find_mfa_for_user(args.serial_number, session, session3)
if not serial_number:
print("There are no MFA devices associated with this user.",
file=sys.stderr)
return None, None, USER_RECOVERABLE_ERROR
token_code = args.token_code
if token_code is None:
while token_code is None or len(token_code) != 6:
token_code = getpass.getpass("MFA Token Code: ")
return serial_number, token_code, OK
|
returns the user's token serial number, MFA token code, and an
error code.
|
def data(self):
d = {}
for key in self._data:
if key == "plugins":
d[key] = self.plugins.data()
else:
try:
d[key] = getattr(self, key)
except AttributeError:
pass
return d
|
Returns the entire configuration as a dict.
Note that this will force all plugins to be loaded.
|
def read_config(*args):
ret = {}
if _TRAFFICCTL:
cmd = _traffic_ctl('config', 'get')
else:
cmd = _traffic_line('-r')
try:
for arg in args:
log.debug('Querying: %s', arg)
ret[arg] = _subprocess(cmd + [arg])
except KeyError:
pass
return ret
|
Read Traffic Server configuration variable definitions.
.. versionadded:: 2016.11.0
.. code-block:: bash
salt '*' trafficserver.read_config proxy.config.http.keep_alive_post_out
|
def _create_genetic_expander(problem, mutation_chance):
def _expander(fringe, iteration, viewer):
fitness = [x.value for x in fringe]
sampler = InverseTransformSampler(fitness, fringe)
new_generation = []
expanded_nodes = []
expanded_neighbors = []
for _ in fringe:
node1 = sampler.sample()
node2 = sampler.sample()
child = problem.crossover(node1.state, node2.state)
action = 'crossover'
if random.random() < mutation_chance:
child = problem.mutate(child)
action += '+mutation'
child_node = SearchNodeValueOrdered(state=child, problem=problem, action=action)
new_generation.append(child_node)
expanded_nodes.append(node1)
expanded_neighbors.append([child_node])
expanded_nodes.append(node2)
expanded_neighbors.append([child_node])
if viewer:
viewer.event('expanded', expanded_nodes, expanded_neighbors)
fringe.clear()
for node in new_generation:
fringe.append(node)
return _expander
|
Creates an expander that expands the bests nodes of the population,
crossing over them.
|
def generate_signature(method, version, endpoint,
date, rel_url, content_type, content,
access_key, secret_key, hash_type):
hash_type = hash_type
hostname = endpoint._val.netloc
if version >= 'v4.20181215':
content = b''
else:
if content_type.startswith('multipart/'):
content = b''
body_hash = hashlib.new(hash_type, content).hexdigest()
sign_str = '{}\n{}\n{}\nhost:{}\ncontent-type:{}\nx-backendai-version:{}\n{}'.format(
method.upper(),
rel_url,
date.isoformat(),
hostname,
content_type.lower(),
version,
body_hash
)
sign_bytes = sign_str.encode()
sign_key = hmac.new(secret_key.encode(),
date.strftime('%Y%m%d').encode(), hash_type).digest()
sign_key = hmac.new(sign_key, hostname.encode(), hash_type).digest()
signature = hmac.new(sign_key, sign_bytes, hash_type).hexdigest()
headers = {
'Authorization': 'BackendAI signMethod=HMAC-{}, credential={}:{}'.format(
hash_type.upper(),
access_key,
signature
),
}
return headers, signature
|
Generates the API request signature from the given parameters.
|
def Create(cls, discovery_doc,
scope_ls, client_id, client_secret, user_agent, names, api_key):
scopes = set(
discovery_doc.get('auth', {}).get('oauth2', {}).get('scopes', {}))
scopes.update(scope_ls)
package = discovery_doc['name']
url_version = discovery_doc['version']
base_url, base_path = _ComputePaths(package, url_version,
discovery_doc)
client_info = {
'package': package,
'version': NormalizeVersion(discovery_doc['version']),
'url_version': url_version,
'scopes': sorted(list(scopes)),
'client_id': client_id,
'client_secret': client_secret,
'user_agent': user_agent,
'api_key': api_key,
'base_url': base_url,
'base_path': base_path,
}
client_class_name = '%s%s' % (
names.ClassName(client_info['package']),
names.ClassName(client_info['version']))
client_info['client_class_name'] = client_class_name
return cls(**client_info)
|
Create a new ClientInfo object from a discovery document.
|
def fit_harmonic_oscillator(orbit, omega0=[1., 1, 1], minimize_kwargs=None):
r
omega0 = np.atleast_1d(omega0)
pot = orbit.hamiltonian.potential
if pot is None:
raise ValueError("The orbit object must have an associated potential")
w = np.squeeze(orbit.w(pot.units))
if w.ndim > 2:
raise ValueError("Input orbit object must be a single orbit.")
def f(omega, w):
potential = HarmonicOscillatorPotential(omega=omega, units=pot.units)
H = (potential.value(w[:3]).decompose(pot.units).value +
0.5*np.sum(w[3:]**2, axis=0))
return np.sum(np.squeeze(H - np.mean(H))**2)
if minimize_kwargs is None:
minimize_kwargs = dict()
minimize_kwargs['x0'] = omega0
minimize_kwargs['method'] = minimize_kwargs.get('method', 'Nelder-Mead')
res = minimize(f, args=(w,), **minimize_kwargs)
if not res.success:
raise ValueError("Failed to fit toy potential to orbit.")
best_omega = np.abs(res.x)
return HarmonicOscillatorPotential(omega=best_omega, units=pot.units)
|
r"""
Fit the toy harmonic oscillator potential to the sum of the energy
residuals relative to the mean energy by minimizing the function
.. math::
f(\boldsymbol{\omega}) = \sum_i (\frac{1}{2}v_i^2 + \Phi_{\rm sho}(x_i\,|\,\boldsymbol{\omega}) - <E>)^2
TODO: This should fail if the Hamiltonian associated with the orbit has
a frame other than StaticFrame
Parameters
----------
orbit : `~gala.dynamics.Orbit`
omega0 : array_like (optional)
Initial frequency guess.
minimize_kwargs : dict (optional)
Keyword arguments to pass through to `scipy.optimize.minimize`.
Returns
-------
omegas : float
Best-fit harmonic oscillator frequencies.
|
def create_title_page(self, filename, title=''):
fobj = open(filename, "w")
fobj.write(self.header % (title, self.style))
fobj.write("<h1>%s</h1>\n" % title)
fobj.write("<h2>Memory distribution over time</h2>\n")
fobj.write(self.charts['snapshots'])
fobj.write("<h2>Snapshots statistics</h2>\n")
fobj.write('<table id="nb">\n')
classlist = list(self.index.keys())
classlist.sort()
for snapshot in self.snapshots:
fobj.write('<tr><td>\n')
fobj.write('<table id="tl" rules="rows">\n')
fobj.write("<h3>%s snapshot at %s</h3>\n" % (
snapshot.desc or 'Untitled',
pp_timestamp(snapshot.timestamp)
))
data = {}
data['sys'] = pp(snapshot.system_total.vsz)
data['tracked'] = pp(snapshot.tracked_total)
data['asizeof'] = pp(snapshot.asizeof_total)
data['overhead'] = pp(getattr(snapshot, 'overhead', 0))
fobj.write(self.snapshot_summary % data)
if snapshot.tracked_total:
fobj.write(self.snapshot_cls_header)
for classname in classlist:
data = snapshot.classes[classname].copy()
data['cls'] = '<a href="%s">%s</a>' % (self.relative_path(self.links[classname]), classname)
data['sum'] = pp(data['sum'])
data['avg'] = pp(data['avg'])
fobj.write(self.snapshot_cls % data)
fobj.write('</table>')
fobj.write('</td><td>\n')
if snapshot.tracked_total:
fobj.write(self.charts[snapshot])
fobj.write('</td></tr>\n')
fobj.write("</table>\n")
fobj.write(self.footer)
fobj.close()
|
Output the title page.
|
def dt_cluster(dt_list, dt_thresh=16.0):
if not isinstance(dt_list[0], float):
o_list = dt2o(dt_list)
else:
o_list = dt_list
o_list_sort = np.sort(o_list)
o_list_sort_idx = np.argsort(o_list)
d = np.diff(o_list_sort)
b = np.nonzero(d > dt_thresh)[0] + 1
b = np.hstack((0, b, d.shape[0] + 1))
f_list = []
for i in range(len(b)-1):
b_idx = [b[i], b[i+1]-1]
b_dt = o_list_sort[b_idx]
b_idx_orig = o_list_sort_idx[b_idx]
all_idx = np.arange(b_idx[0], b_idx[1])
all_sort = o_list_sort[all_idx]
all_idx_orig = o_list_sort_idx[all_idx]
dict = {}
dict['break_indices'] = b_idx_orig
dict['break_ts_o'] = b_dt
dict['break_ts_dt'] = o2dt(b_dt)
dict['all_indices'] = all_idx_orig
dict['all_ts_o'] = all_sort
dict['all_ts_dt'] = o2dt(all_sort)
f_list.append(dict)
return f_list
|
Find clusters of similar datetimes within datetime list
|
def longest_existing_path(_path):
r
existing_path = _path
while True:
_path_new = os.path.dirname(existing_path)
if exists(_path_new):
existing_path = _path_new
break
if _path_new == existing_path:
print('!!! [utool] This is a very illformated path indeed.')
existing_path = ''
break
existing_path = _path_new
return existing_path
|
r"""
Returns the longest root of _path that exists
Args:
_path (str): path string
Returns:
str: _path - path string
CommandLine:
python -m utool.util_path --exec-longest_existing_path
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> target = dirname(ut.__file__)
>>> _path = join(target, 'nonexist/foobar')
>>> existing_path = longest_existing_path(_path)
>>> result = ('existing_path = %s' % (str(existing_path),))
>>> print(result)
>>> assert existing_path == target
|
def WaitUntilComplete(self,poll_freq=2,timeout=None):
start_time = time.time()
while len(self.requests):
cur_requests = []
for request in self.requests:
status = request.Status()
if status in ('notStarted','executing','resumed','queued','running'): cur_requests.append(request)
elif status == 'succeeded': self.success_requests.append(request)
elif status in ("failed", "unknown"): self.error_requests.append(request)
self.requests = cur_requests
if self.requests > 0 and clc.v2.time_utils.TimeoutExpired(start_time, timeout):
raise clc.RequestTimeoutException('Timeout waiting for Requests: {0}'.format(self.requests[0].id),
self.requests[0].Status())
time.sleep(poll_freq)
return(len(self.error_requests))
|
Poll until all request objects have completed.
If status is 'notStarted' or 'executing' continue polling.
If status is 'succeeded' then success
Else log as error
poll_freq option is in seconds
Returns an Int the number of unsuccessful requests. This behavior is subject to change.
>>> clc.v2.Server(alias='BTDI',id='WA1BTDIKRT02').PowerOn().WaitUntilComplete()
0
|
def generate_getter(value):
@property
@wraps(is_)
def getter(self):
return self.is_(value)
return getter
|
Generate getter for given value.
|
def setup_logging(level, monchrome=False, log_file=None):
if log_file:
logging.basicConfig(filename=log_file, filemode='w',
level=logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = ColoredFormatter("%(levelname)s: %(message)s", monchrome)
ch.setFormatter(formatter)
packages = ('__main__', 'fusesoc',)
for package in packages:
logger = logging.getLogger(package)
logger.addHandler(ch)
logger.setLevel(level)
warning_only_packages = []
for package in warning_only_packages:
logger = logging.getLogger(package)
logger.addHandler(ch)
logger.setLevel(logging.WARNING)
logger.debug('Setup logging at level {}.'.format(level))
|
Utility function for setting up logging.
|
def get_parameter(self, index):
result = None
if index < len(self.paramorder):
key = self.paramorder[index]
if key in self._parameters:
result = self._parameters[key]
return result
|
Returns the ValueElement corresponding to the parameter
at the specified index.
|
def _send_tasks_and_stop_queuing(**kwargs):
log.info('Stopping queueing tasks and sending already queued ones.')
_stop_queuing_tasks()
task_queue = _get_task_queue()
while task_queue:
task, args, kwargs, extrakw = task_queue.pop(0)
task.original_apply_async(args=args, kwargs=kwargs, **extrakw)
|
Sends all delayed Celery tasks and stop queuing new ones for now.
|
def get_document(id, index=INDEX_NAME, doc_type=DOC_TYPE, **kwargs):
result = es_conn.get(index=index, doc_type=doc_type, id=id, **kwargs)
return result['_source']
|
Thin wrapper to get a single document by ID.
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.get
https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html
|
def start_output (self):
super(CSVLogger, self).start_output()
row = []
if self.has_part("intro"):
self.write_intro()
self.flush()
else:
self.write(u"")
self.queue = StringIO()
self.writer = csv.writer(self.queue, dialect=self.dialect,
delimiter=self.separator, lineterminator=self.linesep,
quotechar=self.quotechar)
for s in Columns:
if self.has_part(s):
row.append(s)
if row:
self.writerow(row)
|
Write checking start info as csv comment.
|
def get_terminal_size():
def read_terminal_size_by_ioctl(fd):
try:
import struct, fcntl, termios
cr = struct.unpack('hh', fcntl.ioctl(1, termios.TIOCGWINSZ,
'0000'))
except ImportError:
return None
except IOError as e:
return None
return cr[1], cr[0]
cr = read_terminal_size_by_ioctl(0) or \
read_terminal_size_by_ioctl(1) or \
read_terminal_size_by_ioctl(2)
if not cr:
try:
import os
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = read_terminal_size_by_ioctl(fd)
os.close(fd)
except:
pass
if not cr:
import os
cr = [80, 25]
if os.getenv('ROWS'):
cr[1] = int(os.getenv('ROWS'))
if os.getenv('COLUMNS'):
cr[0] = int(os.getenv('COLUMNS'))
return cr[1], cr[0]
|
Finds the width of the terminal, or returns a suitable default value.
|
def getspectrum(self, index):
mz_bytes, intensity_bytes = self.get_spectrum_as_string(index)
mz_array = np.frombuffer(mz_bytes, dtype=self.mzPrecision)
intensity_array = np.frombuffer(intensity_bytes, dtype=self.intensityPrecision)
return mz_array, intensity_array
|
Reads the spectrum at specified index from the .ibd file.
:param index:
Index of the desired spectrum in the .imzML file
Output:
mz_array: numpy.ndarray
Sequence of m/z values representing the horizontal axis of the desired mass
spectrum
intensity_array: numpy.ndarray
Sequence of intensity values corresponding to mz_array
|
def _start_machine(machine, session):
try:
return machine.launchVMProcess(session, '', '')
except Exception as e:
log.debug(e.message, exc_info=True)
return None
|
Helper to try and start machines
@param machine:
@type machine: IMachine
@param session:
@type session: ISession
@return:
@rtype: IProgress or None
|
def extract_source_params(src):
tags = get_taglist(src)
data = []
for key, param, vtype in BASE_PARAMS:
if key in src.attrib:
if vtype == "c":
data.append((param, src.attrib[key]))
elif vtype == "f":
data.append((param, float(src.attrib[key])))
else:
data.append((param, None))
elif key in tags:
if vtype == "c":
data.append((param, src.nodes[tags.index(key)].text))
elif vtype == "f":
data.append((param, float(src.nodes[tags.index(key)].text)))
else:
data.append((param, None))
else:
data.append((param, None))
return dict(data)
|
Extract params from source object.
|
def _calculate_fake_duration():
utc_start_time = datetime.datetime.utcnow()
local_start_time = utc_start_time - \
(datetime.datetime.utcnow() - datetime.datetime.now())
utc_finish_time = datetime.datetime.utcnow()
start_time = local_start_time.time().isoformat()
delta = (utc_finish_time - utc_start_time)
duration = (delta.seconds * 1000000 + delta.microseconds) / 1000.0
return start_time, duration
|
Generate a NULL duration for when states do not run
but we want the results to be consistent.
|
def _handler_http(self, result):
monitor = result['monitor']
self.thread_debug("process_http", data=monitor, module='handler')
self.stats.http_handled += 1
logargs = {
'type':"metric",
'endpoint': result['url'],
'pipeline': monitor['pipeline'],
'service': monitor['service'],
'instance': monitor['instance'],
'status': result['status'],
'elapsed-ms': round(result['elapsedms'], 5),
'code': result['code']
}
self.NOTIFY(result['message'], **logargs)
if result['status'] != self.instances[monitor['instance']]['status']:
self.instances[monitor['instance']]['status'] = result['status']
self.rcs.patch('instance',
monitor['instance'],
{'status': result['status']})
|
Handle the result of an http monitor
|
def has_reset(self):
currentTime = self._read_as_int(Addr.Uptime, 4)
if currentTime <= self._ticks:
self._ticks = currentTime
return True
self._ticks = currentTime
return False
|
Checks the grizzly to see if it reset itself because of
voltage sag or other reasons. Useful to reinitialize acceleration or
current limiting.
|
def parse_config(config, env, as_dict=True):
if config is None:
return None
stripped = config.strip()
if len(stripped) == 0:
config = {}
elif stripped[0] == '{':
config = json.loads(config)
else:
config = yaml.load(config)
if as_dict:
config = dict(config)
replace_vars(config, env)
return config
|
Parse a config from a magic cell body. This could be JSON or YAML. We turn it into
a Python dictionary then recursively replace any variable references using the supplied
env dictionary.
|
def _get_captcha(reddit_session, captcha_id):
url = urljoin(reddit_session.config['captcha'],
captcha_id + '.png')
sys.stdout.write('Captcha URL: {0}\nCaptcha: '.format(url))
sys.stdout.flush()
raw = sys.stdin.readline()
if not raw:
sys.stdin.close()
return None
return {'iden': captcha_id, 'captcha': raw.strip()}
|
Prompt user for captcha solution and return a prepared result.
|
def send_contact(self, chat_id, phone_number, first_name, last_name=None, vcard=None, disable_notification=None, reply_to_message_id=None, reply_markup=None):
from pytgbot.api_types.sendable.reply_markup import ForceReply
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardRemove
assert_type_or_raise(chat_id, (int, unicode_type), parameter_name="chat_id")
assert_type_or_raise(phone_number, unicode_type, parameter_name="phone_number")
assert_type_or_raise(first_name, unicode_type, parameter_name="first_name")
assert_type_or_raise(last_name, None, unicode_type, parameter_name="last_name")
assert_type_or_raise(vcard, None, unicode_type, parameter_name="vcard")
assert_type_or_raise(disable_notification, None, bool, parameter_name="disable_notification")
assert_type_or_raise(reply_to_message_id, None, int, parameter_name="reply_to_message_id")
assert_type_or_raise(reply_markup, None, (InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply), parameter_name="reply_markup")
result = self.do("sendContact", chat_id=chat_id, phone_number=phone_number, first_name=first_name, last_name=last_name, vcard=vcard, disable_notification=disable_notification, reply_to_message_id=reply_to_message_id, reply_markup=reply_markup)
if self.return_python_objects:
logger.debug("Trying to parse {data}".format(data=repr(result)))
from pytgbot.api_types.receivable.updates import Message
try:
return Message.from_array(result)
except TgApiParseException:
logger.debug("Failed parsing as api_type Message", exc_info=True)
raise TgApiParseException("Could not parse result.")
return result
|
Use this method to send phone contacts. On success, the sent Message is returned.
https://core.telegram.org/bots/api#sendcontact
Parameters:
:param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername)
:type chat_id: int | str|unicode
:param phone_number: Contact's phone number
:type phone_number: str|unicode
:param first_name: Contact's first name
:type first_name: str|unicode
Optional keyword parameters:
:param last_name: Contact's last name
:type last_name: str|unicode
:param vcard: Additional data about the contact in the form of a vCard, 0-2048 bytes
:type vcard: str|unicode
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: bool
:param reply_to_message_id: If the message is a reply, ID of the original message
:type reply_to_message_id: int
:param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove keyboard or to force a reply from the user.
:type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardRemove | pytgbot.api_types.sendable.reply_markup.ForceReply
Returns:
:return: On success, the sent Message is returned
:rtype: pytgbot.api_types.receivable.updates.Message
|
def reverse(self):
self.log(u"Reversing...")
all_length = self.all_length
self.__mfcc = self.__mfcc[:, ::-1]
tmp = self.__middle_end
self.__middle_end = all_length - self.__middle_begin
self.__middle_begin = all_length - tmp
if self.__mfcc_mask is not None:
self.__mfcc_mask = self.__mfcc_mask[::-1]
self.__mfcc_mask_map *= -1
self.__mfcc_mask_map += all_length - 1
self.__mfcc_mask_map = self.__mfcc_mask_map[::-1]
self.__speech_intervals = [(all_length - i[1], all_length - i[0]) for i in self.__speech_intervals[::-1]]
self.__nonspeech_intervals = [(all_length - i[1], all_length - i[0]) for i in self.__nonspeech_intervals[::-1]]
self.is_reversed = not self.is_reversed
self.log(u"Reversing...done")
|
Reverse the audio file.
The reversing is done efficiently using NumPy views inplace
instead of swapping values.
Only speech and nonspeech intervals are actually recomputed
as Python lists.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.