docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
---|---|---|
Serial call to read month tariffs block into meter object buffer.
Args:
months_type (int): A :class:`~ekmmeters.ReadMonths` value.
Returns:
bool: True on completion.
|
def readMonthTariffs(self, months_type):
self.setContext("readMonthTariffs")
try:
req_type = binascii.hexlify(str(months_type).zfill(1))
req_str = "01523102303031" + req_type + "282903"
work_table = self.m_mons
if months_type == ReadMonths.kWhReverse:
work_table = self.m_rev_mons
self.request(False)
req_crc = self.calc_crc16(req_str[2:].decode("hex"))
req_str += req_crc
self.m_serial_port.write(req_str.decode("hex"))
raw_ret = self.m_serial_port.getResponse(self.getContext())
self.serialPostEnd()
unpacked_read = self.unpackStruct(raw_ret, work_table)
self.convertData(unpacked_read, work_table, self.m_kwh_precision)
return_crc = self.calc_crc16(raw_ret[1:-2])
if str(return_crc) == str(work_table["crc16"][MeterData.StringValue]):
ekm_log("Months CRC success, type = " + str(req_type))
self.setContext("")
return True
except:
ekm_log(traceback.format_exc(sys.exc_info()))
self.setContext("")
return False
| 851,906 |
Read a single holiday date from meter buffer.
Args:
setting_holiday (int): Holiday from 0-19 or in range(Extents.Holidays)
Returns:
tuple: Holiday tuple, elements are strings.
=============== ======================
Holiday Holiday 0-19 as string
Day Day 1-31 as string
Month Monty 1-12 as string
=============== ======================
|
def extractHolidayDate(self, setting_holiday):
ret = namedtuple("result", ["Holiday", "Month", "Day"])
setting_holiday += 1
ret.Holiday = str(setting_holiday)
if (setting_holiday < 1) or (setting_holiday > Extents.Holidays):
ekm_log("Out of bounds: holiday " + str(setting_holiday))
ret.Holiday = ret.Month = ret.Day = str(0)
return ret
idxday = "Holiday_" + str(setting_holiday) + "_Day"
idxmon = "Holiday_" + str(setting_holiday) + "_Mon"
if idxmon not in self.m_hldy:
ret.Holiday = ret.Month = ret.Day = str(0)
return ret
if idxday not in self.m_hldy:
ret.Holiday = ret.Month = ret.Day = str(0)
return ret
ret.Day = self.m_hldy[idxday][MeterData.StringValue]
ret.Month = self.m_hldy[idxmon][MeterData.StringValue]
return ret
| 851,909 |
Internal method to set the command result string.
Args:
msg (str): Message built during command.
|
def writeCmdMsg(self, msg):
ekm_log("(writeCmdMsg | " + self.getContext() + ") " + msg)
self.m_command_msg = msg
| 851,912 |
Password step of set commands
This method is normally called within another serial command, so it
does not issue a termination string. Any default password is set
in the caller parameter list, never here.
Args:
password_str (str): Required password.
Returns:
bool: True on completion and ACK.
|
def serialCmdPwdAuth(self, password_str):
result = False
try:
req_start = "0150310228" + binascii.hexlify(password_str) + "2903"
req_crc = self.calc_crc16(req_start[2:].decode("hex"))
req_str = req_start + req_crc
self.m_serial_port.write(req_str.decode("hex"))
if self.m_serial_port.getResponse(self.getContext()).encode("hex") == "06":
ekm_log("Password accepted (" + self.getContext() + ")")
result = True
else:
ekm_log("Password call failure no 06(" + self.getContext() + ")")
except:
ekm_log("Password call failure by exception(" + self.getContext() + ")")
ekm_log(traceback.format_exc(sys.exc_info()))
return result
| 851,913 |
Required request() override for v3 and standard method to read meter.
Args:
send_terminator (bool): Send termination string at end of read.
Returns:
bool: CRC request flag result from most recent read
|
def request(self, send_terminator = False):
self.m_a_crc = False
start_context = self.getContext()
self.setContext("request[v3A]")
try:
self.m_serial_port.write("2f3f".decode("hex") +
self.m_meter_address +
"210d0a".decode("hex"))
self.m_raw_read_a = self.m_serial_port.getResponse(self.getContext())
unpacked_read_a = self.unpackStruct(self.m_raw_read_a, self.m_blk_a)
self.convertData(unpacked_read_a, self.m_blk_a, 1)
self.m_a_crc = self.crcMeterRead(self.m_raw_read_a, self.m_blk_a)
if send_terminator:
self.serialPostEnd()
self.calculateFields()
self.makeReturnFormat()
except:
ekm_log(traceback.format_exc(sys.exc_info()))
self.setContext(start_context)
return self.m_a_crc
| 851,917 |
Insert to :class:`~ekmmeters.MeterDB` subclass.
Please note MeterDB subclassing is only for simplest-case.
Args:
meter_db (MeterDB): Instance of subclass of MeterDB.
|
def insert(self, meter_db):
if meter_db:
meter_db.dbInsert(self.m_req, self.m_raw_read_a, self.m_raw_read_b)
else:
ekm_log("Attempt to insert when no MeterDB assigned.")
pass
| 851,919 |
Return :class:`~ekmmeters.Field` content, scaled and formatted.
Args:
fld_name (str): A :class:`~ekmmeters.Field` value which is on your meter.
Returns:
str: String value (scaled if numeric) for the field.
|
def getField(self, fld_name):
result = ""
if fld_name in self.m_req:
result = self.m_req[fld_name][MeterData.StringValue]
else:
ekm_log("Requested nonexistent field: " + fld_name)
return result
| 851,921 |
Combined A and B read for V4 meter.
Args:
send_terminator (bool): Send termination string at end of read.
Returns:
bool: True on completion.
|
def request(self, send_terminator = False):
try:
retA = self.requestA()
retB = self.requestB()
if retA and retB:
self.makeAB()
self.calculateFields()
self.updateObservers()
return True
except:
ekm_log(traceback.format_exc(sys.exc_info()))
return False
| 851,927 |
Single call wrapper for LCD set."
Wraps :func:`~ekmmeters.V4Meter.setLcd` and associated init and add methods.
Args:
display_list (list): List composed of :class:`~ekmmeters.LCDItems`
password (str): Optional password.
Returns:
bool: Passthrough from :func:`~ekmmeters.V4Meter.setLcd`
|
def setLCDCmd(self, display_list, password="00000000"):
result = False
try:
self.initLcd()
item_cnt = len(display_list)
if (item_cnt > 45) or (item_cnt <= 0):
ekm_log("LCD item list must have between 1 and 40 items")
return False
for display_item in display_list:
self.addLcdItem(int(display_item))
result = self.setLCD(password)
except:
ekm_log(traceback.format_exc(sys.exc_info()))
return result
| 851,932 |
Serial call to set relay.
Args:
seconds (int): Seconds to hold, ero is hold forever. See :class:`~ekmmeters.RelayInterval`.
relay (int): Selected relay, see :class:`~ekmmeters.Relay`.
status (int): Status to set, see :class:`~ekmmeters.RelayState`
password (str): Optional password
Returns:
bool: True on completion and ACK.
|
def setRelay(self, seconds, relay, status, password="00000000"):
result = False
self.setContext("setRelay")
try:
self.clearCmdMsg()
if len(password) != 8:
self.writeCmdMsg("Invalid password length.")
self.setContext("")
return result
if seconds < 0 or seconds > 9999:
self.writeCmdMsg("Relay duration must be between 0 and 9999.")
self.setContext("")
return result
if not self.requestA():
self.writeCmdMsg("Bad read CRC on setting")
else:
if not self.serialCmdPwdAuth(password):
self.writeCmdMsg("Password failure")
else:
req_str = ""
req_str = ("01573102303038" +
binascii.hexlify(str(relay)).zfill(2) +
"28" +
binascii.hexlify(str(status)).zfill(2) +
binascii.hexlify(str(seconds).zfill(4)) + "2903")
req_str += self.calc_crc16(req_str[2:].decode("hex"))
self.m_serial_port.write(req_str.decode("hex"))
if self.m_serial_port.getResponse(self.getContext()).encode("hex") == "06":
self.writeCmdMsg("Success: 06 returned.")
result = True
self.serialPostEnd()
except:
ekm_log(traceback.format_exc(sys.exc_info()))
self.setContext("")
return result
| 851,933 |
Serial call to set pulse input ratio on a line.
Args:
line_in (int): Member of :class:`~ekmmeters.Pulse`
new_cnst (int): New pulse input ratio
password (str): Optional password
Returns:
|
def setPulseInputRatio(self, line_in, new_cnst, password="00000000"):
result = False
self.setContext("setPulseInputRatio")
try:
if not self.requestA():
self.writeCmdMsg("Bad read CRC on setting")
else:
if not self.serialCmdPwdAuth(password):
self.writeCmdMsg("Password failure")
else:
req_const = binascii.hexlify(str(new_cnst).zfill(4))
line_const = binascii.hexlify(str(line_in - 1))
req_str = "01573102303041" + line_const + "28" + req_const + "2903"
req_str += self.calc_crc16(req_str[2:].decode("hex"))
self.m_serial_port.write(req_str.decode("hex"))
if self.m_serial_port.getResponse(self.getContext()).encode("hex") == "06":
self.writeCmdMsg("Success: 06 returned.")
result = True
self.serialPostEnd()
except:
ekm_log(traceback.format_exc(sys.exc_info()))
self.setContext("")
return result
| 851,935 |
Serial call to zero resettable kWh registers.
Args:
password (str): Optional password.
Returns:
bool: True on completion and ACK.
|
def setZeroResettableKWH(self, password="00000000"):
result = False
self.setContext("setZeroResettableKWH")
try:
if not self.requestA():
self.writeCmdMsg("Bad read CRC on setting")
else:
if not self.serialCmdPwdAuth(password):
self.writeCmdMsg("Password failure")
else:
req_str = "0157310230304433282903"
req_str += self.calc_crc16(req_str[2:].decode("hex"))
self.m_serial_port.write(req_str.decode("hex"))
if self.m_serial_port.getResponse(self.getContext()).encode("hex") == "06":
self.writeCmdMsg("Success: 06 returned.")
result = True
self.serialPostEnd()
except:
ekm_log(traceback.format_exc(sys.exc_info()))
self.setContext("")
return result
| 851,936 |
Serial call to set LCD using meter object bufer.
Used with :func:`~ekmmeters.V4Meter.addLcdItem`.
Args:
password (str): Optional password
Returns:
bool: True on completion and ACK.
|
def setLCD(self, password="00000000"):
result = False
self.setContext("setLCD")
try:
self.clearCmdMsg()
if len(password) != 8:
self.writeCmdMsg("Invalid password length.")
self.setContext("")
return result
if not self.request():
self.writeCmdMsg("Bad read CRC on setting")
else:
if not self.serialCmdPwdAuth(password):
self.writeCmdMsg("Password failure")
else:
req_table = ""
fill_len = 40 - len(self.m_lcd_items)
for lcdid in self.m_lcd_items:
append_val = binascii.hexlify(str(lcdid).zfill(2))
req_table += append_val
for i in range(0, fill_len):
append_val = binascii.hexlify(str(0).zfill(2))
req_table += append_val
req_str = "015731023030443228" + req_table + "2903"
req_str += self.calc_crc16(req_str[2:].decode("hex"))
self.m_serial_port.write(req_str.decode("hex"))
if self.m_serial_port.getResponse(self.getContext()).encode("hex") == "06":
self.writeCmdMsg("Success: 06 returned.")
result = True
self.serialPostEnd()
except:
ekm_log(traceback.format_exc(sys.exc_info()))
self.setContext("")
return result
| 851,937 |
Returns a connection to a mongo-clusters.
Args:
label (string): the label of a cluster.
Returns:
A connection to the cluster labeld with label.
Raises:
AttributeError: there is no cluster with the given label in the
config
|
def get_cluster(self, label):
for cluster in self._clusters:
if label == cluster['label']:
return self._get_connection(cluster)
raise AttributeError('No such cluster %s.' % label)
| 852,070 |
Validate that the provided configurtion is valid.
Each dictionary in the configuration list must have the following
mandatory entries :
{label: {host(string), port(int), dbpath(string|list of strings)}}
It can also contain 1 optional key:
{read_preference(string)}
Args:
config: the list of configurations provided at instantiation
Raises:
TypeError: a fault in the configurations is found
|
def _validate_config(config):
if not isinstance(config, list):
raise TypeError('Config must be a list')
for config_dict in config:
if not isinstance(config_dict, dict):
raise TypeError('Config must be a list of dictionaries')
label = config_dict.keys()[0]
cfg = config_dict[label]
if not isinstance(cfg, dict):
raise TypeError('Config structure is broken')
if 'host' not in cfg:
raise TypeError('Config entries must have a value for host')
if not isinstance(cfg['host'], str) and not isinstance(cfg['host'], list):
raise TypeError('Host must be a string or a list.')
if 'port' not in cfg:
raise TypeError('Config entries must have a value for port')
if not isinstance(cfg['port'], int):
raise TypeError('Port must be an int')
if 'dbpath' not in cfg:
raise TypeError('Config entries must have a value for dbpath')
if not isinstance(cfg['dbpath'], str):
if not isinstance(cfg['dbpath'], list):
raise TypeError('Dbpath must either a string or a list of '
'strings')
for dbpath in cfg['dbpath']:
if not isinstance(dbpath, str):
raise TypeError('Dbpath must either a string or a list '
'of strings')
if ('read_preference' in cfg and
not isinstance(cfg['read_preference'], str)):
raise TypeError('Read_preference must be a string')
if ('replicaSet' in cfg and
not isinstance(cfg['replicaSet'], str)):
raise TypeError('replicaSet must be a string')
| 852,071 |
Converts the dbpath to a regexp pattern.
Transforms dbpath from a string or an array of strings to a
regexp pattern which will be used to match database names.
Args:
dbpath: a string or an array containing the databases to be matched
from a cluster.
Returns:
A regexp pattern that will match any of the desired databases on
on a cluster.
|
def _parse_dbpath(dbpath):
if isinstance(dbpath, list):
# Support dbpath param as an array.
dbpath = '|'.join(dbpath)
# Append $ (end of string) so that twit will not match twitter!
if not dbpath.endswith('$'):
dbpath = '(%s)$' % dbpath
return dbpath
| 852,073 |
Converts read_preference from string to pymongo.ReadPreference value.
Args:
read_preference: string containig the read_preference from the
config file
Returns:
A value from the pymongo.ReadPreference enum
Raises:
Exception: Invalid read preference
|
def _get_read_preference(read_preference):
read_preference = getattr(pymongo.ReadPreference, read_preference, None)
if read_preference is None:
raise ValueError('Invalid read preference: %s' % read_preference)
return read_preference
| 852,074 |
Set the timeout for existing and future Clients.
Close all current connections. This will cause future operations to
create new Clients with the network_timeout passed through
socketTimeoutMS optional parameter.
Args:
network_timeout: The new value in milliseconds for the timeout.
|
def set_timeout(self, network_timeout):
# Do nothing if attempting to set the same timeout twice.
if network_timeout == self._network_timeout:
return
self._network_timeout = network_timeout
self._disconnect()
| 852,075 |
Return a connection to a Cluster.
Return a MongoClient or a MongoReplicaSetClient for the given Cluster.
This is done in a lazy manner (if there is already a Client connected to
the Cluster, it is returned and no other Client is created).
Args:
cluster: A dict containing information about a cluster.
Returns:
A MongoClient or MongoReplicaSetClient instance connected to the
desired cluster
|
def _get_connection(self, cluster):
# w=1 because:
# http://stackoverflow.com/questions/14798552/is-mongodb-2-x-write-concern-w-1-truly-equals-to-safe-true
if 'connection' not in cluster:
cluster['connection'] = self._connection_class(
socketTimeoutMS=self._network_timeout,
w=1,
j=self.j,
**cluster['params'])
return cluster['connection']
| 852,077 |
Map a database name to the Cluster that holds the database.
Args:
dbname: A database name.
Returns:
A dict containing the information about the Cluster that holds the
database.
|
def _match_dbname(self, dbname):
for config in self._clusters:
if re.match(config['pattern'], dbname):
return config
raise Exception('No such database %s.' % dbname)
| 852,078 |
Add arguments to the parser for collection in app.args.
Args:
parser:
`argparse.ArgumentParser`. Parser.
Arguments added here are server on
self.args.
|
def add_arguments(cls, parser):
parser.add_argument(
'-c', '--create-missing-tasks',
action='store_true',
dest='create_missing_tasks',
help="[sync] create asana tasks for issues without tasks"
)
parser.add_argument(
'-l', '--sync-labels',
action='store_true',
dest='sync_labels',
help="[sync] sync labels and milestones for each issue"
)
| 852,423 |
Fetch trace ids by service and span name.
Gets "limit" number of entries from before the "end_ts".
Span name is optional.
Timestamps are in microseconds.
Parameters:
- service_name
- span_name
- end_ts
- limit
- order
|
def getTraceIdsBySpanName(self, service_name, span_name, end_ts, limit, order):
self.send_getTraceIdsBySpanName(service_name, span_name, end_ts, limit, order)
return self.recv_getTraceIdsBySpanName()
| 852,461 |
Fetch trace ids by service name.
Gets "limit" number of entries from before the "end_ts".
Timestamps are in microseconds.
Parameters:
- service_name
- end_ts
- limit
- order
|
def getTraceIdsByServiceName(self, service_name, end_ts, limit, order):
self.send_getTraceIdsByServiceName(service_name, end_ts, limit, order)
return self.recv_getTraceIdsByServiceName()
| 852,463 |
Fetch trace ids with a particular annotation.
Gets "limit" number of entries from before the "end_ts".
When requesting based on time based annotations only pass in the first parameter, "annotation" and leave out
the second "value". If looking for a key-value binary annotation provide both, "annotation" is then the
key in the key-value.
Timestamps are in microseconds.
Parameters:
- service_name
- annotation
- value
- end_ts
- limit
- order
|
def getTraceIdsByAnnotation(self, service_name, annotation, value, end_ts, limit, order):
self.send_getTraceIdsByAnnotation(service_name, annotation, value, end_ts, limit, order)
return self.recv_getTraceIdsByAnnotation()
| 852,465 |
Get the full traces associated with the given trace ids.
Second argument is a list of methods of adjusting the trace
data before returning it. Can be empty.
Parameters:
- trace_ids
- adjust
|
def getTracesByIds(self, trace_ids, adjust):
self.send_getTracesByIds(trace_ids, adjust)
return self.recv_getTracesByIds()
| 852,468 |
Get the trace timelines associated with the given trace ids.
This is a convenience method for users that just want to know
the annotations and the (assumed) order they happened in.
Second argument is a list of methods of adjusting the trace
data before returning it. Can be empty.
Note that if one of the trace ids does not have any data associated with it, it will not be
represented in the output list.
Parameters:
- trace_ids
- adjust
|
def getTraceTimelinesByIds(self, trace_ids, adjust):
self.send_getTraceTimelinesByIds(trace_ids, adjust)
return self.recv_getTraceTimelinesByIds()
| 852,470 |
Fetch trace summaries for the given trace ids.
Second argument is a list of methods of adjusting the trace
data before returning it. Can be empty.
Note that if one of the trace ids does not have any data associated with it, it will not be
represented in the output list.
Parameters:
- trace_ids
- adjust
|
def getTraceSummariesByIds(self, trace_ids, adjust):
self.send_getTraceSummariesByIds(trace_ids, adjust)
return self.recv_getTraceSummariesByIds()
| 852,472 |
Not content with just one of traces, summaries or timelines? Want it all? This is the method for you.
Parameters:
- trace_ids
- adjust
|
def getTraceCombosByIds(self, trace_ids, adjust):
self.send_getTraceCombosByIds(trace_ids, adjust)
return self.recv_getTraceCombosByIds()
| 852,474 |
Change the TTL of a trace. If we find an interesting trace we want to keep around for further
investigation.
Parameters:
- trace_id
- ttl_seconds
|
def setTraceTimeToLive(self, trace_id, ttl_seconds):
self.send_setTraceTimeToLive(trace_id, ttl_seconds)
self.recv_setTraceTimeToLive()
| 852,479 |
Get an aggregate representation of all services paired with every service they call in to.
This includes information on call counts and mean/stdDev/etc of call durations. The two arguments
specify epoch time in microseconds. The end time is optional and defaults to one day after the
start time.
Parameters:
- start_time
- end_time
|
def getDependencies(self, start_time, end_time):
self.send_getDependencies(start_time, end_time)
return self.recv_getDependencies()
| 852,484 |
Given a time stamp, server service name, and rpc name, fetch all of the client services calling in paired
with the lists of every trace Ids (list<i64>) from the server to client.
The three arguments specify epoch time in microseconds, server side service name and rpc name. The return maps
contains the key - client_service_name and value - list<trace_id>.
Parameters:
- time_stamp
- service_name
- rpc_name
|
def getServiceNamesToTraceIds(self, time_stamp, service_name, rpc_name):
self.send_getServiceNamesToTraceIds(time_stamp, service_name, rpc_name)
return self.recv_getServiceNamesToTraceIds()
| 852,491 |
Aggregates methods
Parameters:
- service_name
- annotations
|
def storeTopAnnotations(self, service_name, annotations):
self.send_storeTopAnnotations(service_name, annotations)
self.recv_storeTopAnnotations()
| 852,974 |
combine a jinja template with a secret .ini file
Args:
config_path (str): path to .cfg file with jinja templating
secret_path (str): path to .ini-like secrets file
Returns:
ProsperConfig: rendered configuration object
|
def render_secrets(
config_path,
secret_path,
):
with open(secret_path, 'r') as s_fh:
secret_ini = anyconfig.load(s_fh, ac_parser='ini')
with open(config_path, 'r') as c_fh:
raw_cfg = c_fh.read()
rendered_cfg = anytemplate.renders(raw_cfg, secret_ini, at_engine='jinja2')
p_config = ProsperConfig(config_path)
local_config = configparser.ConfigParser()
local_config.optionxform = str
local_config.read_string(rendered_cfg)
p_config.local_config = local_config
return p_config
| 854,120 |
check environment for key/value pair
Args:
section_name (str): section name
key_name (str): key to look up
envname_pad (str): namespace padding
logger (:obj:`logging.logger`): logging handle
Returns:
str: value in environment
|
def get_value_from_environment(
section_name,
key_name,
envname_pad=ENVNAME_PAD,
logger=logging.getLogger('ProsperCommon'),
):
var_name = '{pad}_{section}__{key}'.format(
pad=envname_pad,
section=section_name,
key=key_name
)
logger.debug('var_name=%s', var_name)
value = getenv(var_name)
logger.debug('env value=%s', value)
return value
| 854,122 |
go and fetch the global/local configs from file and load them with configparser
Args:
config_filepath (str): path to config
local_filepath_override (str): secondary place to locate config file
Returns:
ConfigParser: global_config
ConfigParser: local_config
|
def get_configs(
config_filepath,
local_filepath_override='',
):
global_config = read_config(config_filepath)
local_filepath = get_local_config_filepath(config_filepath, True)
if local_filepath_override:
local_filepath = local_filepath_override
local_config = read_config(local_filepath)
return global_config, local_config
| 854,123 |
fetch and parse config file
Args:
config_filepath (str): path to config file. abspath > relpath
logger (:obj:`logging.Logger`): logger to catch error msgs
|
def read_config(
config_filepath,
logger=logging.getLogger('ProsperCommon'),
):
config_parser = configparser.ConfigParser(
interpolation=ExtendedInterpolation(),
allow_no_value=True,
delimiters=('='),
inline_comment_prefixes=('#')
)
logger.debug('config_filepath=%s', config_filepath)
with open(config_filepath, 'r') as filehandle:
config_parser.read_file(filehandle)
return config_parser
| 854,124 |
helper for finding local filepath for config
Args:
config_filepath (str): path to local config abspath > relpath
force_local (bool): force return of _local.cfg version
Returns:
str: Path to local config, or global if path DNE
|
def get_local_config_filepath(
config_filepath,
force_local=False,
):
local_config_name = path.basename(config_filepath).split('.')[0] + '_local.cfg'
local_config_filepath = path.join(path.split(config_filepath)[0], local_config_name)
real_config_filepath = ''
if path.isfile(local_config_filepath) or force_local:
#if _local.cfg version exists, use it instead
real_config_filepath = local_config_filepath
else:
#else use tracked default
real_config_filepath = config_filepath
return real_config_filepath
| 854,125 |
Replicate configparser.get() functionality
Args:
section_name (str): section name in config
key_name (str): key name in config.section_name
Returns:
str: do not check defaults, only return local value
Raises:
KeyError: unable to find option in either local or global config
|
def get(
self,
section_name,
key_name,
):
value = None
try:
value = self.local_config.get(section_name, key_name)
except Exception as error_msg:
self.logger.warning(
'%s.%s not found in local config', section_name, key_name
)
try:
value = self.global_config.get(section_name, key_name)
except Exception as error_msg:
self.logger.error(
'%s.%s not found in global config', section_name, key_name
)
raise KeyError('Could not find option in local/global config')
return value
| 854,127 |
Split an image into a specified number of tiles.
Args:
img (ndarray): The image to split.
number_tiles (int): The number of tiles required.
Returns:
Tuple of tiles
|
def split_to_tiles(img, columns, rows):
# validate_image(img, number_tiles)
im_w, im_h = img.shape
# columns, rows = calc_columns_rows(number_tiles)
# extras = (columns * rows) - number_tiles
tile_w, tile_h = int(np.floor(im_w / columns)), int(np.floor(im_h / rows))
tiles = []
# number = 1
for pos_y in range(0, im_h - rows, tile_h): # -rows for rounding error.
for pos_x in range(0, im_w - columns, tile_w): # as above.
roi = (pos_x, pos_y, pos_x + tile_w, pos_y + tile_h)
# image = img.crop(area)
tile = img[roi[1]:roi[3], roi[0]:roi[2]]
# position = (int(floor(pos_x / tile_w)) + 1,
# int(floor(pos_y / tile_h)) + 1)
# coords = (pos_x, pos_y)
# tile = Tile(image, number, position, coords)
tiles.append(tile)
# number += 1
return tuple(tiles)
| 854,193 |
tries to resolve version number
Args:
here_path (str): path to project local dir
default_version (str): what version to return if all else fails
Returns:
str: semantic_version information for library
|
def get_version(
here_path,
default_version=DEFAULT_VERSION,
):
if 'site-packages' in here_path:
# Running as dependency
return _version_from_file(here_path)
if os.environ.get('TRAVIS_TAG'):
# Running on Travis-CI: trumps all
if not TEST_MODE: # pragma: no cover
return os.environ.get('TRAVIS_TAG').replace('v', '')
else:
warnings.warn(
'Travis detected, but TEST_MODE enabled',
exceptions.ProsperVersionTestModeWarning)
try:
current_tag = _read_git_tags(default_version=default_version)
except Exception: # pragma: no cover
return _version_from_file(here_path)
# TODO: if #steps from tag root, increment minor
# TODO: check if off main branch and add name to prerelease
with open(os.path.join(here_path, 'version.txt'), 'w') as v_fh:
# save version info somewhere static
v_fh.write(current_tag)
return current_tag
| 854,266 |
tries to find current git tag
Notes:
git_command exposed for testing null case
Args:
default_version (str): what version to make
git_command (:obj:`list`): subprocess command
Retruns:
str: latest version found, or default
Warns:
exceptions.ProsperDefaultVersionWarning: git version not found
|
def _read_git_tags(
default_version=DEFAULT_VERSION,
git_command=('git', 'tag'),
):
try:
current_tags = check_output(git_command).splitlines()
except Exception: # pragma: no cover
raise
if not current_tags[0]:
warnings.warn(
'Unable to resolve current version',
exceptions.ProsperDefaultVersionWarning)
return default_version
latest_version = semantic_version.Version(default_version)
for tag in current_tags:
tag_str = decode(tag, 'utf-8').replace('v', '')
try:
tag_ver = semantic_version.Version(tag_str)
except Exception: # pragma: no cover
continue # invalid tags ok, but no release
if tag_ver > latest_version:
latest_version = tag_ver
return str(latest_version)
| 854,267 |
for PyPI installed versions, just get data from file
Args:
path_to_version (str): abspath to dir where version.txt exists
default_version (str): fallback version in case of error
Returns:
str: current working version
|
def _version_from_file(
path_to_version,
default_version=DEFAULT_VERSION,
):
version_filepath = os.path.join(path_to_version, 'version.txt')
if not os.path.isfile(version_filepath):
warnings.warn(
'Unable to resolve current version',
exceptions.ProsperDefaultVersionWarning)
return default_version
with open(version_filepath, 'r') as v_fh:
data = v_fh.read()
return data
| 854,268 |
debug logger for stdout messages. Replacement for print()
Note:
Will try to overwrite minimum log level to enable requested log_level
Args:
log_level (str): desired log level for handle https://docs.python.org/3/library/logging.html#logging-levels
log_format (str): format for logging messages https://docs.python.org/3/library/logging.html#logrecord-attributes
custom_args (str): special ID to include in messages
|
def configure_debug_logger(
self,
log_level='DEBUG',
log_format=ReportingFormats.STDOUT.value,
custom_args=''
):
self._configure_common(
'debug_',
log_level,
log_format,
'Debug',
logging.StreamHandler(),
custom_args=custom_args
)
| 854,273 |
Load object with webhook_url
Args:
webhook_url (str): full webhook url given by Discord 'create webhook' func
|
def webhook(self, webhook_url):
if not webhook_url:
raise Exception('Url can not be None')
matcher = re.match(self.__webhook_url_format, webhook_url)
if not matcher:
raise Exception('Invalid url format, looking for: ' + self.__webhook_url_format)
self.api_keys(int(matcher.group(1)), matcher.group(2))
| 854,278 |
Load object with id/API pair
Args:
serverid (int): Discord 'guild' webhook is attached to
api_key (`str`:uuid): unique ID for webhook
|
def api_keys(self, serverid, api_key):
if serverid and api_key:
self.can_query = True # Yes, we _are_ (will be) configured
self.serverid = int(serverid)
self.api_key = api_key
self.webhook_url = self.__base_url + str(self.serverid) + '/' + self.api_key
| 854,279 |
HackyDiscordHandler init
Args:
webhook_obj (:obj:`DiscordWebhook`): discord webhook has all the info for connection
alert_recipients (`str`:<@int>, optional): user/group to notify
|
def __init__(self, webhook_obj, alert_recipient=None): # pragma: no cover
logging.Handler.__init__(self)
self.webhook_obj = webhook_obj
if not self.webhook_obj: # test if it's configured
raise Exception('Webhook not configured.')
self.api_url = webhook_obj.webhook_url
self.alert_recipient = alert_recipient
self.alert_length = 0
if self.alert_recipient:
self.alert_length = len(self.alert_recipient)
| 854,280 |
separated Requests logic for easier testing
Args:
message (str): actual logging string to be passed to REST endpoint
Todo:
* Requests.text/json return for better testing options
|
def send_msg_to_webhook(self, message):
payload = {
'content':message
}
header = {
'Content-Type':'application/json'
}
try:
request = requests.post(
self.api_url,
headers=header,
json=payload
)
request.raise_for_status()
except Exception as error_msg: #pragma: no cover
warning_msg = (
'EXCEPTION: UNABLE TO COMMIT LOG MESSAGE' +
'\n\texception={0}'.format(repr(error_msg)) +
'\n\tmessage={0}'.format(message)
)
warnings.warn(
warning_msg,
exceptions.WebhookFailedEmitWarning
)
| 854,282 |
add slack-specific flourishes to responses
https://api.slack.com/docs/message-attachments
Args:
record (:obj:`logging.record`): message to log
Returns:
(:obj:`dict`): attachments object for reporting
|
def decorate(self, record):
attachments = {}
## Set color
if record.levelno >= logging.ERROR:
attachments['color'] = 'warning' #builtin
if record.levelno >= logging.CRITICAL:
attachments['color'] = 'danger' #builtin
## Log text
attach_text = '{levelname}: {name} {module}.{funcName}:{lineno}'.format(
levelname=record.levelname,
name=record.name,
module=record.module,
funcName=record.funcName,
lineno=record.lineno
)
attachments['text'] = attach_text
attachments['fallback'] = attach_text
return attachments
| 854,285 |
push message out to webhook
Args:
json_payload (:obj:`dict`): preformatted payload a la https://api.slack.com/docs/message-attachments
log_msg (str): actual log message
|
def send_msg_to_webhook(self, json_payload, log_msg):
if SILENCE_OVERRIDE: # pragma: no cover
return
payload = {
'text': log_msg,
'attachments':[json_payload]
}
header = {
'Content-Type':'application/json'
}
try:
request = requests.post(
self.webhook_url,
headers=header,
json=payload
)
request.raise_for_status()
except Exception as error_msg: #pragma: no cover
warning_msg = (
'EXCEPTION: UNABLE TO COMMIT LOG MESSAGE' +
'\n\texception={0}'.format(repr(error_msg)) +
'\n\tmessage={0}'.format(log_msg)
)
warnings.warn(
warning_msg,
exceptions.WebhookFailedEmitWarning
)
| 854,286 |
Build up HipChat specific values for log record
Args:
record (:obj:`logging.record`): log message object
Returns:
dict: params for POST request
|
def decorate(self, record):
color = 'gray'
if record.levelno == logging.WARNING:
color = 'yellow'
if record.levelno == logging.INFO:
color = 'green'
if record.levelno == logging.DEBUG:
color = 'gray'
if record.levelno >= logging.ERROR:
color = 'red'
notify = False
if record.levelno >= logging.ERROR:
nofiy = True
payload = {
'color': color,
'notify': notify,
'message_format': 'text'
}
return payload
| 854,287 |
Collect information about accepted arguments in following format:
(
(<argument name>, <accepted types and values>),
(<argument name>, <accepted types and values>),
...
)
Args:
args_info (inspect.FullArgSpec): Information about function
arguments.
|
def __scan_func(self, args_info):
# Process args.
for i, accepted_arg_vals in enumerate(self.accepted_arg_values):
# Wrap each accepted value in the list if yet not wrapped.
accepted_arg_vals = self.__wrap_accepted_val(accepted_arg_vals)
# Add default value (if exists) in list of accepted values.
if args_info.defaults:
def_range = len(args_info.defaults) - len(args_info.args[i:])
if def_range >= 0:
self.optional_args.append(i)
accepted_value = args_info.defaults[def_range]
accepted_arg_vals.append(accepted_value)
# Try to detect current argument name.
if len(args_info.args) > i:
arg_name = args_info.args[i]
else:
arg_name = None
self.optional_args.append(i)
# Save info about current argument and his accepted values.
self.accepted_args.append((arg_name, accepted_arg_vals))
# Process kwargs.
for arg_name, accepted_arg_vals in self.accepted_kwargs_values.items():
# Wrap each accepted value in the list if yet not wrapped.
accepted_arg_vals = self.__wrap_accepted_val(accepted_arg_vals)
# Mark current argument as optional.
i = len(self.accepted_args)
self.optional_args.append(i)
# Save info about current argument and his accepted values.
self.accepted_args.append((arg_name, accepted_arg_vals))
| 854,311 |
Compare value of each required argument with list of
accepted values.
Args:
func_name (str): Function name.
args (list): Collection of the position arguments.
kwargs (dict): Collection of the keyword arguments.
Raises:
InvalidArgumentNumberError: When position or count of the arguments
is incorrect.
ArgumentValidationError: When encountered unexpected argument
value.
|
def __validate_args(self, func_name, args, kwargs):
from pyvalid.validators import Validator
for i, (arg_name, accepted_values) in enumerate(self.accepted_args):
if i < len(args):
value = args[i]
else:
if arg_name in kwargs:
value = kwargs[arg_name]
elif i in self.optional_args:
continue
else:
raise InvalidArgumentNumberError(func_name)
is_valid = False
for accepted_val in accepted_values:
is_validator = (
isinstance(accepted_val, Validator) or
(
isinstance(accepted_val, MethodType) and
hasattr(accepted_val, '__func__') and
isinstance(accepted_val.__func__, Validator)
)
)
if is_validator:
is_valid = accepted_val(value)
elif isinstance(accepted_val, type):
is_valid = isinstance(value, accepted_val)
else:
is_valid = value == accepted_val
if is_valid:
break
if not is_valid:
ord_num = self.__ordinal(i + 1)
raise ArgumentValidationError(
ord_num,
func_name,
value,
accepted_values
)
| 854,312 |
Calculates SHA256 digest of a file.
Args:
source: either a file-like object or a path to file
|
def file_digest(source):
hash_sha256 = hashlib.sha256()
should_close = False
if isinstance(source, six.string_types):
should_close = True
source = open(source, 'rb')
for chunk in iter(lambda: source.read(_BUFFER_SIZE), b''):
hash_sha256.update(chunk)
if should_close:
source.close()
return hash_sha256.hexdigest()
| 854,665 |
Create the EcoNet API interface object.
Args:
email (str): EcoNet account email address.
password (str): EcoNet account password.
|
def __init__(self, email, password):
self.email = email
self.password = password
self.token = None
self.refresh_token = None
self.last_api_call = None
self.state = []
# get a token
self.authenticated = self._authenticate()
| 854,813 |
Moves or creates the file with stream contents to a new location.
Args:
new_path: path to move to, if None a temporary file is created.
|
def save(self, new_path=None):
self.saved_in_temp = new_path is None
if new_path is None:
fd, new_path = tempfile.mkstemp()
os.close(fd)
if self.current_path:
shutil.move(self.current_path, new_path)
else:
with open(new_path, 'wb') as dest:
_copy_stream(self._data, dest, self._size)
self.current_path = new_path
| 855,133 |
logs launcher message before startup
Args:
log_level (str): level to notify at
|
def notify_launch(self, log_level='ERROR'):
if not self.debug:
self.logger.log(
logging.getLevelName(log_level),
'LAUNCHING %s -- %s', self.PROGNAME, platform.node()
)
flask_options = {
key: getattr(self, key) for key in OPTION_ARGS
}
flask_options['host'] = self.get_host()
self.logger.info('OPTIONS: %s', flask_options)
| 855,225 |
Generate a basic error to include the current state.
A parser can supply only a representation of what it is expecting to
this method and the reader will provide the context, including the index
to the error.
Args:
expected: A representation of what the parser is currently expecting
Returns:
A full error message
|
def expected_error(self, expected: str) -> str:
if self.finished:
return 'Expected {} but found end of source'.format(expected)
else:
return 'Expected {} but found {} at index {}'.format(expected, self.next_token(), self.position)
| 855,354 |
Generate an error to indicate that infinite recursion was encountered.
A parser can supply a representation of itself to this method and the
reader will supply the context, including the location where the
parser stalled.
Args:
repeated_parser: A representation of the repeated parser
Returns:
A full error message
|
def recursion_error(self, repeated_parser: str):
if self.finished:
return 'Infinite recursion detected in {}; empty string was matched and will be matched forever at ' \
'end of source'.format(repeated_parser)
else:
return 'Infinite recursion detected in {}; empty string was matched and will be matched forever at ' \
'index {} before {}'.format(repeated_parser, self.position, self.next_token())
| 855,355 |
Generate a basic error to include the current state.
A parser can supply only a representation of what it is expecting to
this method and the reader will provide the context, including the line
and character positions.
Args:
expected: A representation of what the parser is currently expecting
Returns:
A full error message
|
def expected_error(self, expected: str) -> str:
if self.finished:
return super().expected_error(expected)
else:
line_index, character_index, line, pointer = self.current_line()
return 'Expected {} but found {}\nLine {}, character {}\n\n{}{}'.format(
expected, repr(self.next_token()), line_index, character_index, line, pointer)
| 855,361 |
Generate an error to indicate that infinite recursion was encountered.
A parser can supply a representation of itself to this method and the
reader will supply the context, including the location where the
parser stalled.
Args:
repeated_parser: A representation of the repeated parser
Returns:
A full error message
|
def recursion_error(self, repeated_parser: str):
if self.finished:
return super().recursion_error(repeated_parser)
else:
line_index, character_index, line, pointer = self.current_line()
return 'Infinite recursion detected in {}; empty string was matched and will be matched forever\n' \
'Line {}, character {}\n\n{}{}'.format(repeated_parser, line_index, character_index, line, pointer)
| 855,362 |
Merge the failure message from another status into this one.
Whichever status represents parsing that has gone the farthest is
retained. If both statuses have gone the same distance, then the
expected values from both are retained.
Args:
status: The status to merge into this one.
Returns:
This ``Status`` which may have ``farthest`` and ``expected``
updated accordingly.
|
def merge(self, status: 'Status[Input, Output]') -> 'Status[Input, Output]':
if status is None or status.farthest is None:
# No new message; simply return unchanged
pass
elif self.farthest is None:
# No current message to compare to; use the message from status
self.farthest = status.farthest
self.expected = status.expected
elif status.farthest.position < self.farthest.position:
# New message is not farther; keep current message
pass
elif status.farthest.position > self.farthest.position:
# New message is farther than current message; replace with new message
self.farthest = status.farthest
self.expected = status.expected
else:
# New message and current message are equally far; merge messages
self.expected = status.expected + self.expected
return self
| 855,365 |
Produce a function that always returns a supplied value.
Args:
x: Any object.
Returns:
A function that accepts any number of positional and keyword arguments, discards them, and returns ``x``.
|
def constant(x: A) -> Callable[..., A]:
def constanted(*args, **kwargs):
return x
return constanted
| 855,373 |
Convert a function taking multiple arguments into a function taking a single iterable argument.
Args:
f: Any function
Returns:
A function that accepts a single iterable argument. Each element of this iterable argument is passed as an
argument to ``f``.
Example:
$ def f(a, b, c):
$ return a + b + c
$
$ f(1, 2, 3) # 6
$ g = splat(f)
$ g([1, 2, 3]) # 6
|
def splat(f: Callable[..., A]) -> Callable[[Iterable], A]:
def splatted(args):
return f(*args)
return splatted
| 855,374 |
Convert a function taking a single iterable argument into a function taking multiple arguments.
Args:
f: Any function taking a single iterable argument
Returns:
A function that accepts multiple arguments. Each argument of this function is passed as an element of an
iterable to ``f``.
Example:
$ def f(a):
$ return a[0] + a[1] + a[2]
$
$ f([1, 2, 3]) # 6
$ g = unsplat(f)
$ g(1, 2, 3) # 6
|
def unsplat(f: Callable[[Iterable], A]) -> Callable[..., A]:
def unsplatted(*args):
return f(args)
return unsplatted
| 855,375 |
makes gunicorn.conf file for launching in docker
Notes:
https://sebest.github.io/post/protips-using-gunicorn-inside-a-docker-image/
renders gunicorn.config (python) file in running dir
looks for GUNICORN_{option} in environment vars
Args:
_gunicorn_config_path (str): TEST HOOK, path to dump file
|
def make_gunicorn_config(
_gunicorn_config_path='',
):
gunicorn_py =
gunicorn_file = 'gunicorn.conf'
if _gunicorn_config_path:
gunicorn_file = _gunicorn_config_path
with open(gunicorn_file, 'w') as gunicorn_cfg:
gunicorn_cfg.write(gunicorn_py)
| 855,496 |
Execute an HTTP request to delete a message from queue.
Arguments:
message_id -- The ID of the message to be deleted.
reservation_id -- Reservation Id of the message. Reserved message could not be deleted without reservation Id.
subscriber_name -- This is required to acknowledge push after long-processing of message is finished.
|
def delete(self, message_id, reservation_id=None, subscriber_name=None):
url = "queues/%s/messages/%s" % (self.name, message_id)
qitems = {}
if reservation_id is not None:
qitems['reservation_id'] = reservation_id
if subscriber_name is not None:
qitems['subscriber_name'] = subscriber_name
body = json.dumps(qitems)
result = self.client.delete(url=url, body=body,
headers={'Content-Type': 'application/json'})
return result['body']
| 855,573 |
Execute an HTTP request to delete messages from queue.
Arguments:
ids -- A list of messages id to be deleted from the queue.
messages -- Response to message reserving.
|
def delete_multiple(self, ids=None, messages=None):
url = "queues/%s/messages" % self.name
items = None
if ids is None and messages is None:
raise Exception('Please, specify at least one parameter.')
if ids is not None:
items = [{'id': item} for item in ids]
if messages is not None:
items = [{'id': item['id'], 'reservation_id': item['reservation_id']} for item in
messages['messages']]
data = json.dumps({'ids': items})
result = self.client.delete(url=url, body=data,
headers={'Content-Type': 'application/json'})
return result['body']
| 855,574 |
Executes an HTTP request to create message on the queue.
Creates queue if not existed.
Arguments:
messages -- An array of messages to be added to the queue.
|
def post(self, *messages):
url = "queues/%s/messages" % self.name
msgs = [{'body': msg} if isinstance(msg, basestring) else msg
for msg in messages]
data = json.dumps({'messages': msgs})
result = self.client.post(url=url, body=data,
headers={'Content-Type': 'application/json'})
return result['body']
| 855,575 |
Retrieves Messages from the queue and reserves it.
Arguments:
max -- The maximum number of messages to reserve. Defaults to 1.
timeout -- Timeout in seconds.
wait -- Time to long poll for messages, in seconds. Max is 30 seconds. Default 0.
delete -- If true, do not put each message back on to the queue after reserving. Default false.
|
def reserve(self, max=None, timeout=None, wait=None, delete=None):
url = "queues/%s/reservations" % self.name
qitems = {}
if max is not None:
qitems['n'] = max
if timeout is not None:
qitems['timeout'] = timeout
if wait is not None:
qitems['wait'] = wait
if delete is not None:
qitems['delete'] = delete
body = json.dumps(qitems)
response = self.client.post(url, body=body,
headers={'Content-Type': 'application/json'})
return response['body']
| 855,577 |
Touching a reserved message extends its timeout to the duration specified when the message was created.
Arguments:
message_id -- The ID of the message.
reservation_id -- Reservation Id of the message.
timeout -- Optional. The timeout in seconds after which new reservation will expire.
|
def touch(self, message_id, reservation_id, timeout=None):
url = "queues/%s/messages/%s/touch" % (self.name, message_id)
qitems = {'reservation_id': reservation_id}
if timeout is not None:
qitems['timeout'] = timeout
body = json.dumps(qitems)
response = self.client.post(url, body=body,
headers={'Content-Type': 'application/json'})
return response['body']
| 855,580 |
Release locked message after specified time. If there is no message with such id on the queue.
Arguments:
message_id -- The ID of the message.
reservation_id -- Reservation Id of the message.
delay -- The time after which the message will be released.
|
def release(self, message_id, reservation_id, delay=0):
url = "queues/%s/messages/%s/release" % (self.name, message_id)
body = {'reservation_id': reservation_id}
if delay > 0:
body['delay'] = delay
body = json.dumps(body)
response = self.client.post(url, body=body,
headers={'Content-Type': 'application/json'})
return response['body']
| 855,581 |
find __version__ for making package
Args:
package_name (str): path to _version.py folder (abspath > relpath)
Returns:
str: __version__ value
|
def get_version(package_name):
module = 'prosper.' + package_name + '._version'
package = importlib.import_module(module)
version = package.__version__
return version
| 855,727 |
Consume reader and return Success only on complete consumption.
This is a helper function for ``parse`` methods, which return ``Success``
when the input is completely consumed and ``Failure`` with an appropriate
message otherwise.
Args:
parser: The parser doing the consuming
reader: The input being consumed
Returns:
A parsing ``Result``
|
def completely_parse_reader(parser: Parser[Input, Output], reader: Reader[Input]) -> Result[Output]:
result = (parser << eof).consume(reader)
if isinstance(result, Continue):
return Success(result.value)
else:
used = set()
unique_expected = []
for expected_lambda in result.expected:
expected = expected_lambda()
if expected not in used:
used.add(expected)
unique_expected.append(expected)
return Failure(result.farthest.expected_error(' or '.join(unique_expected)))
| 855,818 |
Optionally match a parser.
An ``OptionalParser`` attempts to match ``parser``. If it succeeds, it
returns a list of length one with the value returned by the parser as the
only element. If it fails, it returns an empty list.
Args:
parser: Parser or literal
|
def opt(parser: Union[Parser, Sequence[Input]]) -> OptionalParser:
if isinstance(parser, str):
parser = lit(parser)
return OptionalParser(parser)
| 855,820 |
Match a parser one or more times repeatedly.
This matches ``parser`` multiple times in a row. If it matches as least
once, it returns a list of values from each time ``parser`` matched. If it
does not match ``parser`` at all, it fails.
Args:
parser: Parser or literal
|
def rep1(parser: Union[Parser, Sequence[Input]]) -> RepeatedOnceParser:
if isinstance(parser, str):
parser = lit(parser)
return RepeatedOnceParser(parser)
| 855,821 |
Match a parser zero or more times repeatedly.
This matches ``parser`` multiple times in a row. A list is returned
containing the value from each match. If there are no matches, an empty list
is returned.
Args:
parser: Parser or literal
|
def rep(parser: Union[Parser, Sequence[Input]]) -> RepeatedParser:
if isinstance(parser, str):
parser = lit(parser)
return RepeatedParser(parser)
| 855,822 |
Match a parser one or more times separated by another parser.
This matches repeated sequences of ``parser`` separated by ``separator``.
If there is at least one match, a list containing the values of the
``parser`` matches is returned. The values from ``separator`` are discarded.
If it does not match ``parser`` at all, it fails.
Args:
parser: Parser or literal
separator: Parser or literal
|
def rep1sep(parser: Union[Parser, Sequence[Input]], separator: Union[Parser, Sequence[Input]]) \
-> RepeatedOnceSeparatedParser:
if isinstance(parser, str):
parser = lit(parser)
if isinstance(separator, str):
separator = lit(separator)
return RepeatedOnceSeparatedParser(parser, separator)
| 855,823 |
Match a parser zero or more times separated by another parser.
This matches repeated sequences of ``parser`` separated by ``separator``. A
list is returned containing the value from each match of ``parser``. The
values from ``separator`` are discarded. If there are no matches, an empty
list is returned.
Args:
parser: Parser or literal
separator: Parser or literal
|
def repsep(parser: Union[Parser, Sequence[Input]], separator: Union[Parser, Sequence[Input]]) \
-> RepeatedSeparatedParser:
if isinstance(parser, str):
parser = lit(parser)
if isinstance(separator, str):
separator = lit(separator)
return RepeatedSeparatedParser(parser, separator)
| 855,824 |
Retrieve the access, modify, and create timetags for a path along with its size
Arguments:
path (str): full path to the file or directory to be statused
status (dict): optional existing status to be updated/overwritten with new status values
Returns:
dict: {'size': bytes (int), 'accessed': (datetime), 'modified': (datetime), 'created': (datetime)}
|
def path_status(path, filename='', status=None, verbosity=0):
status = status or {}
if not filename:
dir_path, filename = os.path.split() # this will split off a dir and as `filename` if path doesn't end in a /
else:
dir_path = path
full_path = os.path.join(dir_path, filename)
if verbosity > 1:
print(full_path)
status['name'] = filename
status['path'] = full_path
status['dir'] = dir_path
status['type'] = []
try:
status['size'] = os.path.getsize(full_path)
status['accessed'] = datetime.datetime.fromtimestamp(os.path.getatime(full_path))
status['modified'] = datetime.datetime.fromtimestamp(os.path.getmtime(full_path))
status['created'] = datetime.datetime.fromtimestamp(os.path.getctime(full_path))
status['mode'] = os.stat(full_path).st_mode # first 3 digits are User, Group, Other permissions: 1=execute,2=write,4=read
if os.path.ismount(full_path):
status['type'] += ['mount-point']
elif os.path.islink(full_path):
status['type'] += ['symlink']
if os.path.isfile(full_path):
status['type'] += ['file']
elif os.path.isdir(full_path):
status['type'] += ['dir']
if not status['type']:
if os.stat.S_ISSOCK(status['mode']):
status['type'] += ['socket']
elif os.stat.S_ISCHR(status['mode']):
status['type'] += ['special']
elif os.stat.S_ISBLK(status['mode']):
status['type'] += ['block-device']
elif os.stat.S_ISFIFO(status['mode']):
status['type'] += ['pipe']
if not status['type']:
status['type'] += ['unknown']
elif status['type'] and status['type'][-1] == 'symlink':
status['type'] += ['broken']
except OSError:
status['type'] = ['nonexistent'] + status['type']
if verbosity > -1:
warnings.warn("Unable to stat path '{}'".format(full_path))
status['type'] = '->'.join(status['type'])
return status
| 856,745 |
Creates a content entity bucket with the given `contentid`.
This method maps to
https://github.com/exosite/docs/tree/master/provision#post---create-content-entity.
Args:
key: The CIK or Token for the device
model:
contentid: The ID used to name the entity bucket
meta:
protected: Whether or not this is restricted to certain device serial numbers only.
|
def content_create(self, key, model, contentid, meta, protected=False):
params = {'id': contentid, 'meta': meta}
if protected is not False:
params['protected'] = 'true'
data = urlencode(params)
path = PROVISION_MANAGE_CONTENT + model + '/'
return self._request(path,
key, data, 'POST', self._manage_by_cik)
| 856,923 |
(Speculation) Fetches content information for a given vendor, model, and ID as chunks.
This method might map to:
https://github.com/exosite/docs/tree/master/provision#get---get-content-blob-1,
but seems to be missing serial number.
Args:
cik: The CIK for the device
vendor: The name of the vendor
model:
contentid: The ID used to name the entity bucket
|
def content_download(self, cik, vendor, model, contentid):
data = urlencode({'vendor': vendor,
'model': model,
'id': contentid})
headers = {"Accept": "*"}
return self._request(PROVISION_DOWNLOAD,
cik, data, 'GET', True, headers)
| 856,924 |
(Speculation) Fetches content information for a given vendor, model, and ID.
This method might map to:
https://github.com/exosite/docs/tree/master/provision#get---get-content-info-1,
but seems to be missing serial number.
Args:
key: The CIK or Token for the device
model:
contentid: The ID used to name the entity bucket
vendor: The name of the vendor
|
def content_info(self, key, model, contentid, vendor=None):
if not vendor: # if no vendor name, key should be the owner one
path = PROVISION_MANAGE_CONTENT + model + '/' + contentid
return self._request(path, key, '', 'GET', self._manage_by_cik)
else: # if provide vendor name, key can be the device one
data = urlencode({'vendor': vendor,
'model': model,
'id': contentid,
'info': 'true'})
return self._request(PROVISION_DOWNLOAD,
key, data, 'GET', self._manage_by_cik)
| 856,925 |
Returns the list of content IDs for a given model.
This method maps to
https://github.com/exosite/docs/tree/master/provision#get---list-content-ids
Args:
key: The CIK or Token for the device
model:
|
def content_list(self, key, model):
path = PROVISION_MANAGE_CONTENT + model + '/'
return self._request(path, key, '', 'GET', self._manage_by_cik)
| 856,926 |
Deletes the information for the given contentid under the given model.
This method maps to
https://github.com/exosite/docs/tree/master/provision#delete---delete-content
Args:
key: The CIK or Token for the device
model:
|
def content_remove(self, key, model, contentid):
path = PROVISION_MANAGE_CONTENT + model + '/' + contentid
return self._request(path, key, '', 'DELETE', self._manage_by_cik)
| 856,927 |
Store the given data as a result of a query for content id given the model.
This method maps to
https://github.com/exosite/docs/tree/master/provision#post---upload-content
Args:
key: The CIK or Token for the device
model:
contentid: The ID used to name the entity bucket
data: The data blob to save
mimetype: The Content-Type to use when serving the blob later
|
def content_upload(self, key, model, contentid, data, mimetype):
headers = {"Content-Type": mimetype}
path = PROVISION_MANAGE_CONTENT + model + '/' + contentid
return self._request(path, key, data, 'POST', self._manage_by_cik, headers)
| 856,928 |
Given an activation code, activate an entity for the client specified in <ResourceID>.
Args:
auth: <cik>
codetype: Type of code being activated.
code: Code to activate.
|
def activate(self, auth, codetype, code, defer=False):
return self._call('activate', auth, [codetype, code], defer)
| 857,241 |
Create something in Exosite.
Args:
auth: <cik>
type: What thing to create.
desc: Information about thing.
|
def create(self, auth, type, desc, defer=False):
return self._call('create', auth, [type, desc], defer)
| 857,242 |
Deletes the specified resource.
Args:
auth: <cik>
resource: <ResourceID>
|
def drop(self, auth, resource, defer=False):
return self._call('drop', auth, [resource], defer)
| 857,245 |
Empties the specified resource of data per specified constraints.
Args:
auth: <cik>
resource: resource to empty.
options: Time limits.
|
def flush(self, auth, resource, options=None, defer=False):
args = [resource]
if options is not None:
args.append(options)
return self._call('flush', auth, args, defer)
| 857,246 |
Grant resources with specific permissions and return a token.
Args:
auth: <cik>
resource: Alias or ID of resource.
permissions: permissions of resources.
ttl: Time To Live.
|
def grant(self, auth, resource, permissions, ttl=None, defer=False):
args = [resource, permissions]
if ttl is not None:
args.append({"ttl": ttl})
return self._call('grant', auth, args, defer)
| 857,247 |
Request creation and usage information of specified resource according to the specified
options.
Args:
auth: <cik>
resource: Alias or ID of resource
options: Options to define what info you would like returned.
|
def info(self, auth, resource, options={}, defer=False):
return self._call('info', auth, [resource, options], defer)
| 857,248 |
Look up a Resource ID by alias, owned Resource ID, or share activation code under the
client specified in <ClientID>.
Args:
auth: <cik>
type: Type of resource to lookup (alias | owner | shared)
mapping: Based on resource type defined above.
|
def lookup(self, auth, type, mapping, defer=False):
return self._call('lookup', auth, [type, mapping], defer)
| 857,250 |
Creates an alias for a resource.
Args:
auth: <cik>
resource: <ResourceID>
alias: alias to create (map)
|
def map(self, auth, resource, alias, defer=False):
return self._call('map', auth, ['alias', resource, alias], defer)
| 857,251 |
Moves a resource from one parent client to another.
Args:
auth: <cik>
resource: Identifed resource to be moved.
destinationresource: resource of client resource is being moved to.
|
def move(self, auth, resource, destinationresource, options={"aliases": True}, defer=False):
return self._call('move', auth, [resource, destinationresource, options], defer)
| 857,252 |
Read value(s) from a dataport.
Calls a function that builds a request to read the dataport specified by an alias or rid
and returns timeseries data as defined by the options.
Args:
auth: Takes the device cik
resource: Takes the dataport alias or rid.
options: Takes a list of options for what to return.
|
def read(self, auth, resource, options, defer=False):
return self._call('read', auth, [resource, options], defer)
| 857,253 |
Records a list of historical entries to the resource specified.
Note: This API is depricated, use recordbatch instead.
Calls a function that bulids a request that writes a list of historical entries to the
specified resource.
Args:
auth: Takes the device cik
resource: Takes the dataport alias or rid.
entries: A list of entries to write to the resource.
options: Currently unused.
|
def record(self, auth, resource, entries, options={}, defer=False):
return self._call('record', auth, [resource, entries, options], defer)
| 857,254 |
Records a list of historical entries to the resource specified.
Calls a function that bulids a request that writes a list of historical entries to the
specified resource.
Args:
auth: Takes the device cik
resource: Takes the dataport alias or rid.
entries: A list of entries to write to the resource.
|
def recordbatch(self, auth, resource, entries, defer=False):
return self._call('recordbatch', auth, [resource, entries], defer)
| 857,255 |
Given an activation code, the associated entity is revoked after which the activation
code can no longer be used.
Args:
auth: Takes the owner's cik
codetype: The type of code to revoke (client | share)
code: Code specified by <codetype> (cik | share-activation-code)
|
def revoke(self, auth, codetype, code, defer=False):
return self._call('revoke', auth, [codetype, code], defer)
| 857,256 |
Generates a share code for the given resource.
Args:
auth: <cik>
resource: The identifier of the resource.
options: Dictonary of options.
|
def share(self, auth, resource, options={}, defer=False):
return self._call('share', auth, [resource, options], defer)
| 857,257 |
Updates the description of the resource.
Args:
auth: <cik> for authentication
resource: Resource to be updated
desc: A Dictionary containing the update for the resource.
|
def update(self, auth, resource, desc={}, defer=False):
return self._call('update', auth, [resource, desc], defer)
| 857,260 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.