text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Returns true if field with field_name exists.
<END_TASK>
<USER_TASK:>
Description:
def hasField(self, name):
"""Returns true if field with field_name exists.
@param name: Field Name
@return: Boolean
""" |
if self._autoFixNames:
name = self._fixName(name)
return self._fieldAttrDict.has_key(name) |
<SYSTEM_TASK:>
Set value for field in graph.
<END_TASK>
<USER_TASK:>
Description:
def setVal(self, name, val):
"""Set value for field in graph.
@param name : Graph Name
@param value : Value for field.
""" |
if self._autoFixNames:
name = self._fixName(name)
self._fieldValDict[name] = val |
<SYSTEM_TASK:>
Returns value list for Munin Graph
<END_TASK>
<USER_TASK:>
Description:
def getVals(self):
"""Returns value list for Munin Graph
@return: List of name-value pairs.
""" |
return [(name, self._fieldValDict.get(name))
for name in self._fieldNameList] |
<SYSTEM_TASK:>
Return dictionary of Traffic Stats for Network Interfaces.
<END_TASK>
<USER_TASK:>
Description:
def getIfStats(self):
"""Return dictionary of Traffic Stats for Network Interfaces.
@return: Nested dictionary of statistics for each interface.
""" |
info_dict = {}
try:
fp = open(ifaceStatsFile, 'r')
data = fp.read()
fp.close()
except:
raise IOError('Failed reading interface stats from file: %s'
% ifaceStatsFile)
for line in data.splitlines():
mobj = re.match('^\s*([\w\d:]+):\s*(.*\S)\s*$', line)
if mobj:
iface = mobj.group(1)
statline = mobj.group(2)
info_dict[iface] = dict(zip(
('rxbytes', 'rxpackets', 'rxerrs', 'rxdrop', 'rxfifo',
'rxframe', 'rxcompressed', 'rxmulticast',
'txbytes', 'txpackets', 'txerrs', 'txdrop', 'txfifo',
'txcolls', 'txcarrier', 'txcompressed'),
[int(x) for x in statline.split()]))
return info_dict |
<SYSTEM_TASK:>
Returns the number of TCP endpoints discriminated by status.
<END_TASK>
<USER_TASK:>
Description:
def getTCPportConnStatus(self, ipv4=True, ipv6=True, include_listen=False,
**kwargs):
"""Returns the number of TCP endpoints discriminated by status.
@param ipv4: Include IPv4 ports in output if True.
@param ipv6: Include IPv6 ports in output if True.
@param include_listen: Include listening ports in output if True.
@param **kwargs: Keyword variables are used for filtering the
results depending on the values of the columns.
Each keyword must correspond to a field name with
an optional suffix:
field: Field equal to value or in list
of values.
field_ic: Field equal to value or in list of
values, using case insensitive
comparison.
field_regex: Field matches regex value or
matches with any regex in list of
values.
field_ic_regex: Field matches regex value or
matches with any regex in list of
values using case insensitive
match.
@return: Dictionary mapping connection status to the
number of endpoints.
""" |
status_dict = {}
result = self.getStats(tcp=True, udp=False,
include_listen=include_listen,
ipv4=ipv4, ipv6=ipv6,
**kwargs)
stats = result['stats']
for stat in stats:
if stat is not None:
status = stat[8].lower()
status_dict[status] = status_dict.get(status, 0) + 1
return status_dict |
<SYSTEM_TASK:>
Returns TCP connection counts for each local port.
<END_TASK>
<USER_TASK:>
Description:
def getTCPportConnCount(self, ipv4=True, ipv6=True, resolve_ports=False,
**kwargs):
"""Returns TCP connection counts for each local port.
@param ipv4: Include IPv4 ports in output if True.
@param ipv6: Include IPv6 ports in output if True.
@param resolve_ports: Resolve numeric ports to names if True.
@param **kwargs: Keyword variables are used for filtering the
results depending on the values of the columns.
Each keyword must correspond to a field name with
an optional suffix:
field: Field equal to value or in list
of values.
field_ic: Field equal to value or in list of
values, using case insensitive
comparison.
field_regex: Field matches regex value or
matches with any regex in list of
values.
field_ic_regex: Field matches regex value or
matches with any regex in list of
values using case insensitive
match.
@return: Dictionary mapping port number or name to the
number of established connections.
""" |
port_dict = {}
result = self.getStats(tcp=True, udp=False,
include_listen=False, ipv4=ipv4,
ipv6=ipv6, resolve_ports=resolve_ports,
**kwargs)
stats = result['stats']
for stat in stats:
if stat[8] == 'ESTABLISHED':
port_dict[stat[5]] = port_dict.get(5, 0) + 1
return port_dict |
<SYSTEM_TASK:>
Computes proportion of words recalled
<END_TASK>
<USER_TASK:>
Description:
def accuracy_helper(egg, match='exact', distance='euclidean',
features=None):
"""
Computes proportion of words recalled
Parameters
----------
egg : quail.Egg
Data to analyze
match : str (exact, best or smooth)
Matching approach to compute recall matrix. If exact, the presented and
recalled items must be identical (default). If best, the recalled item
that is most similar to the presented items will be selected. If smooth,
a weighted average of all presented items will be used, where the
weights are derived from the similarity between the recalled item and
each presented item.
distance : str
The distance function used to compare presented and recalled items.
Applies only to 'best' and 'smooth' matching approaches. Can be any
distance function supported by numpy.spatial.distance.cdist.
Returns
----------
prop_recalled : numpy array
proportion of words recalled
""" |
def acc(lst):
return len([i for i in np.unique(lst) if i>=0])/(egg.list_length)
opts = dict(match=match, distance=distance, features=features)
if match is 'exact':
opts.update({'features' : 'item'})
recmat = recall_matrix(egg, **opts)
if match in ['exact', 'best']:
result = [acc(lst) for lst in recmat]
elif match is 'smooth':
result = np.mean(recmat, axis=1)
else:
raise ValueError('Match must be set to exact, best or smooth.')
return np.nanmean(result, axis=0) |
<SYSTEM_TASK:>
Establish connection to PostgreSQL Database.
<END_TASK>
<USER_TASK:>
Description:
def _connect(self):
"""Establish connection to PostgreSQL Database.""" |
if self._connParams:
self._conn = psycopg2.connect(**self._connParams)
else:
self._conn = psycopg2.connect('')
try:
ver_str = self._conn.get_parameter_status('server_version')
except AttributeError:
ver_str = self.getParam('server_version')
self._version = util.SoftwareVersion(ver_str) |
<SYSTEM_TASK:>
Utility method that returns database stats as a nested dictionary.
<END_TASK>
<USER_TASK:>
Description:
def _createStatsDict(self, headers, rows):
"""Utility method that returns database stats as a nested dictionary.
@param headers: List of columns in query result.
@param rows: List of rows in query result.
@return: Nested dictionary of values.
First key is the database name and the second key is the
statistics counter name.
""" |
dbstats = {}
for row in rows:
dbstats[row[0]] = dict(zip(headers[1:], row[1:]))
return dbstats |
<SYSTEM_TASK:>
Utility method that returns totals for database statistics.
<END_TASK>
<USER_TASK:>
Description:
def _createTotalsDict(self, headers, rows):
"""Utility method that returns totals for database statistics.
@param headers: List of columns in query result.
@param rows: List of rows in query result.
@return: Dictionary of totals for each statistics column.
""" |
totals = [sum(col) for col in zip(*rows)[1:]]
return dict(zip(headers[1:], totals)) |
<SYSTEM_TASK:>
Executes simple query which returns a single column.
<END_TASK>
<USER_TASK:>
Description:
def _simpleQuery(self, query):
"""Executes simple query which returns a single column.
@param query: Query string.
@return: Query result string.
""" |
cur = self._conn.cursor()
cur.execute(query)
row = cur.fetchone()
return util.parse_value(row[0]) |
<SYSTEM_TASK:>
Returns dictionary with number of connections for each database.
<END_TASK>
<USER_TASK:>
Description:
def getConnectionStats(self):
"""Returns dictionary with number of connections for each database.
@return: Dictionary of database connection statistics.
""" |
cur = self._conn.cursor()
cur.execute("""SELECT datname,numbackends FROM pg_stat_database;""")
rows = cur.fetchall()
if rows:
return dict(rows)
else:
return {} |
<SYSTEM_TASK:>
Returns database block read, transaction and tuple stats for each
<END_TASK>
<USER_TASK:>
Description:
def getDatabaseStats(self):
"""Returns database block read, transaction and tuple stats for each
database.
@return: Nested dictionary of stats.
""" |
headers = ('datname', 'numbackends', 'xact_commit', 'xact_rollback',
'blks_read', 'blks_hit', 'tup_returned', 'tup_fetched',
'tup_inserted', 'tup_updated', 'tup_deleted', 'disk_size')
cur = self._conn.cursor()
cur.execute("SELECT %s, pg_database_size(datname) FROM pg_stat_database;"
% ",".join(headers[:-1]))
rows = cur.fetchall()
dbstats = self._createStatsDict(headers, rows)
totals = self._createTotalsDict(headers, rows)
return {'databases': dbstats, 'totals': totals} |
<SYSTEM_TASK:>
Returns the number of active lock discriminated by lock mode.
<END_TASK>
<USER_TASK:>
Description:
def getLockStatsMode(self):
"""Returns the number of active lock discriminated by lock mode.
@return: : Dictionary of stats.
""" |
info_dict = {'all': dict(zip(self.lockModes, (0,) * len(self.lockModes))),
'wait': dict(zip(self.lockModes, (0,) * len(self.lockModes)))}
cur = self._conn.cursor()
cur.execute("SELECT TRIM(mode, 'Lock'), granted, COUNT(*) FROM pg_locks "
"GROUP BY TRIM(mode, 'Lock'), granted;")
rows = cur.fetchall()
for (mode, granted, cnt) in rows:
info_dict['all'][mode] += cnt
if not granted:
info_dict['wait'][mode] += cnt
return info_dict |
<SYSTEM_TASK:>
Returns the number of active lock discriminated by database.
<END_TASK>
<USER_TASK:>
Description:
def getLockStatsDB(self):
"""Returns the number of active lock discriminated by database.
@return: : Dictionary of stats.
""" |
info_dict = {'all': {},
'wait': {}}
cur = self._conn.cursor()
cur.execute("SELECT d.datname, l.granted, COUNT(*) FROM pg_database d "
"JOIN pg_locks l ON d.oid=l.database "
"GROUP BY d.datname, l.granted;")
rows = cur.fetchall()
for (db, granted, cnt) in rows:
info_dict['all'][db] = info_dict['all'].get(db, 0) + cnt
if not granted:
info_dict['wait'][db] = info_dict['wait'].get(db, 0) + cnt
return info_dict |
<SYSTEM_TASK:>
Establish connection to MySQL Database.
<END_TASK>
<USER_TASK:>
Description:
def _connect(self):
"""Establish connection to MySQL Database.""" |
if self._connParams:
self._conn = MySQLdb.connect(**self._connParams)
else:
self._conn = MySQLdb.connect('') |
<SYSTEM_TASK:>
Returns list of supported storage engines.
<END_TASK>
<USER_TASK:>
Description:
def getStorageEngines(self):
"""Returns list of supported storage engines.
@return: List of storage engine names.
""" |
cur = self._conn.cursor()
cur.execute("""SHOW STORAGE ENGINES;""")
rows = cur.fetchall()
if rows:
return [row[0].lower() for row in rows if row[1] in ['YES', 'DEFAULT']]
else:
return [] |
<SYSTEM_TASK:>
Returns number of processes discriminated by state.
<END_TASK>
<USER_TASK:>
Description:
def getProcessStatus(self):
"""Returns number of processes discriminated by state.
@return: Dictionary mapping process state to number of processes.
""" |
info_dict = {}
cur = self._conn.cursor()
cur.execute("""SHOW FULL PROCESSLIST;""")
rows = cur.fetchall()
if rows:
for row in rows:
if row[6] == '':
state = 'idle'
elif row[6] is None:
state = 'other'
else:
state = str(row[6]).replace(' ', '_').lower()
info_dict[state] = info_dict.get(state, 0) + 1
return info_dict |
<SYSTEM_TASK:>
Returns number of processes discriminated by database name.
<END_TASK>
<USER_TASK:>
Description:
def getProcessDatabase(self):
"""Returns number of processes discriminated by database name.
@return: Dictionary mapping database name to number of processes.
""" |
info_dict = {}
cur = self._conn.cursor()
cur.execute("""SHOW FULL PROCESSLIST;""")
rows = cur.fetchall()
if rows:
for row in rows:
db = row[3]
info_dict[db] = info_dict.get(db, 0) + 1
return info_dict |
<SYSTEM_TASK:>
Loads eggs, fried eggs ands example data
<END_TASK>
<USER_TASK:>
Description:
def load(filepath, update=True):
"""
Loads eggs, fried eggs ands example data
Parameters
----------
filepath : str
Location of file
update : bool
If true, updates egg to latest format
Returns
----------
data : quail.Egg or quail.FriedEgg
Data loaded from disk
""" |
if filepath == 'automatic' or filepath == 'example':
fpath = os.path.dirname(os.path.abspath(__file__)) + '/data/automatic.egg'
return load_egg(fpath)
elif filepath == 'manual':
fpath = os.path.dirname(os.path.abspath(__file__)) + '/data/manual.egg'
return load_egg(fpath, update=False)
elif filepath == 'naturalistic':
fpath = os.path.dirname(os.path.abspath(__file__)) + '/data/naturalistic.egg'
elif filepath.split('.')[-1]=='egg':
return load_egg(filepath, update=update)
elif filepath.split('.')[-1]=='fegg':
return load_fegg(filepath, update=False)
else:
raise ValueError('Could not load file.') |
<SYSTEM_TASK:>
Loads example data
<END_TASK>
<USER_TASK:>
Description:
def load_example_data(dataset='automatic'):
"""
Loads example data
The automatic and manual example data are eggs containing 30 subjects who completed a free
recall experiment as described here: https://psyarxiv.com/psh48/. The subjects
studied 8 lists of 16 words each and then performed a free recall test.
The naturalistic example data is is an egg containing 17 subjects who viewed and verbally
recounted an episode of the BBC series Sherlock, as described here:
https://www.nature.com/articles/nn.4450. We fit a topic model to hand-annotated
text-descriptions of scenes from the video and used the model to transform both the
scene descriptions and manual transcriptions of each subject's verbal recall. We then
used a Hidden Markov Model to segment the video model and the recall models, by subject,
into k events.
Parameters
----------
dataset : str
The dataset to load. Can be 'automatic', 'manual', or 'naturalistic'. The free recall
audio recordings for the 'automatic' dataset was transcribed by Google
Cloud Speech and the 'manual' dataset was transcribed by humans. The 'naturalistic'
dataset was transcribed by humans and transformed as described above.
Returns
----------
data : quail.Egg
Example data
""" |
# can only be auto or manual
assert dataset in ['automatic', 'manual', 'naturalistic'], "Dataset can only be automatic, manual, or naturalistic"
if dataset == 'naturalistic':
# open naturalistic egg
egg = Egg(**dd.io.load(os.path.dirname(os.path.abspath(__file__)) + '/data/' + dataset + '.egg'))
else:
# open pickled egg
try:
with open(os.path.dirname(os.path.abspath(__file__)) + '/data/' + dataset + '.egg', 'rb') as handle:
egg = pickle.load(handle)
except:
f = dd.io.load(os.path.dirname(os.path.abspath(__file__)) + '/data/' + dataset + '.egg')
egg = Egg(pres=f['pres'], rec=f['rec'], dist_funcs=f['dist_funcs'],
subjgroup=f['subjgroup'], subjname=f['subjname'],
listgroup=f['listgroup'], listname=f['listname'],
date_created=f['date_created'])
return egg.crack() |
<SYSTEM_TASK:>
Solve the sparse linear system Ax=b, where b may be a vector or a matrix.
<END_TASK>
<USER_TASK:>
Description:
def spsolve(A, b):
"""Solve the sparse linear system Ax=b, where b may be a vector or a matrix.
Parameters
----------
A : ndarray or sparse matrix
The square matrix A will be converted into CSC or CSR form
b : ndarray or sparse matrix
The matrix or vector representing the right hand side of the equation.
Returns
-------
x : ndarray or sparse matrix
the solution of the sparse linear equation.
If b is a vector, then x is a vector of size A.shape[0]
If b is a matrix, then x is a matrix of size (A.shape[0],)+b.shape[1:]
""" |
x = UmfpackLU(A).solve(b)
if b.ndim == 2 and b.shape[1] == 1:
# compatibility with scipy.sparse.spsolve quirk
return x.ravel()
else:
return x |
<SYSTEM_TASK:>
Solve linear equation A x = b for x
<END_TASK>
<USER_TASK:>
Description:
def solve(self, b):
"""
Solve linear equation A x = b for x
Parameters
----------
b : ndarray
Right-hand side of the matrix equation. Can be vector or a matrix.
Returns
-------
x : ndarray
Solution to the matrix equation
""" |
if isspmatrix(b):
b = b.toarray()
if b.shape[0] != self._A.shape[1]:
raise ValueError("Shape of b is not compatible with that of A")
b_arr = asarray(b, dtype=self._A.dtype).reshape(b.shape[0], -1)
x = np.zeros((self._A.shape[0], b_arr.shape[1]), dtype=self._A.dtype)
for j in range(b_arr.shape[1]):
x[:,j] = self.umf.solve(UMFPACK_A, self._A, b_arr[:,j], autoTranspose=True)
return x.reshape((self._A.shape[0],) + b.shape[1:]) |
<SYSTEM_TASK:>
Solve linear equation of the form A X = B. Where B and X are sparse matrices.
<END_TASK>
<USER_TASK:>
Description:
def solve_sparse(self, B):
"""
Solve linear equation of the form A X = B. Where B and X are sparse matrices.
Parameters
----------
B : any scipy.sparse matrix
Right-hand side of the matrix equation.
Note: it will be converted to csc_matrix via `.tocsc()`.
Returns
-------
X : csc_matrix
Solution to the matrix equation as a csc_matrix
""" |
B = B.tocsc()
cols = list()
for j in xrange(B.shape[1]):
col = self.solve(B[:,j])
cols.append(csc_matrix(col))
return hstack(cols) |
<SYSTEM_TASK:>
Computes recall matrix given list of presented and list of recalled words
<END_TASK>
<USER_TASK:>
Description:
def recall_matrix(egg, match='exact', distance='euclidean', features=None):
"""
Computes recall matrix given list of presented and list of recalled words
Parameters
----------
egg : quail.Egg
Data to analyze
match : str (exact, best or smooth)
Matching approach to compute recall matrix. If exact, the presented and
recalled items must be identical (default). If best, the recalled item
that is most similar to the presented items will be selected. If smooth,
a weighted average of all presented items will be used, where the
weights are derived from the similarity between the recalled item and
each presented item.
distance : str
The distance function used to compare presented and recalled items.
Applies only to 'best' and 'smooth' matching approaches. Can be any
distance function supported by numpy.spatial.distance.cdist.
Returns
----------
recall_matrix : list of lists of ints
each integer represents the presentation position of the recalled word in a given list in order of recall
0s represent recalled words not presented
negative ints represent words recalled from previous lists
""" |
if match in ['best', 'smooth']:
if not features:
features = [k for k,v in egg.pres.loc[0][0].values[0].items() if k!='item']
if not features:
raise('No features found. Cannot match with best or smooth strategy')
if not isinstance(features, list):
features = [features]
if match=='exact':
features=['item']
return _recmat_exact(egg.pres, egg.rec, features)
else:
return _recmat_smooth(egg.pres, egg.rec, features, distance, match) |
<SYSTEM_TASK:>
Read and parse Asterisk Manager Interface Greeting to determine and
<END_TASK>
<USER_TASK:>
Description:
def _getGreeting(self):
"""Read and parse Asterisk Manager Interface Greeting to determine and
set Manager Interface version.
""" |
greeting = self._conn.read_until("\r\n", connTimeout)
mobj = re.match('Asterisk Call Manager\/([\d\.]+)\s*$', greeting)
if mobj:
self._ami_version = util.SoftwareVersion(mobj.group(1))
else:
raise Exception("Asterisk Manager Interface version cannot be determined.") |
<SYSTEM_TASK:>
Query Asterisk Manager Interface for Asterisk Version to configure
<END_TASK>
<USER_TASK:>
Description:
def _initAsteriskVersion(self):
"""Query Asterisk Manager Interface for Asterisk Version to configure
system for compatibility with multiple versions
.
CLI Command - core show version
""" |
if self._ami_version > util.SoftwareVersion('1.0'):
cmd = "core show version"
else:
cmd = "show version"
cmdresp = self.executeCommand(cmd)
mobj = re.match('Asterisk\s*(SVN-branch-|\s)(\d+(\.\d+)*)', cmdresp)
if mobj:
self._asterisk_version = util.SoftwareVersion(mobj.group(2))
else:
raise Exception('Asterisk version cannot be determined.') |
<SYSTEM_TASK:>
Query Asterisk Manager Interface to initialize internal list of
<END_TASK>
<USER_TASK:>
Description:
def _initChannelTypesList(self):
"""Query Asterisk Manager Interface to initialize internal list of
supported channel types.
CLI Command - core show applications
""" |
if self.checkVersion('1.4'):
cmd = "core show channeltypes"
else:
cmd = "show channeltypes"
cmdresp = self.executeCommand(cmd)
self._chantypes = set()
for line in cmdresp.splitlines()[2:]:
mobj = re.match('\s*(\S+)\s+.*\s+(yes|no)\s+', line)
if mobj:
self._chantypes.add(mobj.group(1).lower()) |
<SYSTEM_TASK:>
Returns True if mod is among the loaded modules.
<END_TASK>
<USER_TASK:>
Description:
def hasModule(self, mod):
"""Returns True if mod is among the loaded modules.
@param mod: Module name.
@return: Boolean
""" |
if self._modules is None:
self._initModuleList()
return mod in self._modules |
<SYSTEM_TASK:>
Returns True if app is among the loaded modules.
<END_TASK>
<USER_TASK:>
Description:
def hasApplication(self, app):
"""Returns True if app is among the loaded modules.
@param app: Module name.
@return: Boolean
""" |
if self._applications is None:
self._initApplicationList()
return app in self._applications |
<SYSTEM_TASK:>
Returns True if chan is among the supported channel types.
<END_TASK>
<USER_TASK:>
Description:
def hasChannelType(self, chan):
"""Returns True if chan is among the supported channel types.
@param app: Module name.
@return: Boolean
""" |
if self._chantypes is None:
self._initChannelTypesList()
return chan in self._chantypes |
<SYSTEM_TASK:>
Query Asterisk Manager Interface for Channel Stats.
<END_TASK>
<USER_TASK:>
Description:
def getChannelStats(self, chantypes=('dahdi', 'zap', 'sip', 'iax2', 'local')):
"""Query Asterisk Manager Interface for Channel Stats.
CLI Command - core show channels
@return: Dictionary of statistics counters for channels.
Number of active channels for each channel type.
""" |
if self.checkVersion('1.4'):
cmd = "core show channels"
else:
cmd = "show channels"
cmdresp = self.executeCommand(cmd)
info_dict ={}
for chanstr in chantypes:
chan = chanstr.lower()
if chan in ('zap', 'dahdi'):
info_dict['dahdi'] = 0
info_dict['mix'] = 0
else:
info_dict[chan] = 0
for k in ('active_calls', 'active_channels', 'calls_processed'):
info_dict[k] = 0
regexstr = ('(%s)\/(\w+)' % '|'.join(chantypes))
for line in cmdresp.splitlines():
mobj = re.match(regexstr,
line, re.IGNORECASE)
if mobj:
chan_type = mobj.group(1).lower()
chan_id = mobj.group(2).lower()
if chan_type == 'dahdi' or chan_type == 'zap':
if chan_id == 'pseudo':
info_dict['mix'] += 1
else:
info_dict['dahdi'] += 1
else:
info_dict[chan_type] += 1
continue
mobj = re.match('(\d+)\s+(active channel|active call|calls processed)',
line, re.IGNORECASE)
if mobj:
if mobj.group(2) == 'active channel':
info_dict['active_channels'] = int(mobj.group(1))
elif mobj.group(2) == 'active call':
info_dict['active_calls'] = int(mobj.group(1))
elif mobj.group(2) == 'calls processed':
info_dict['calls_processed'] = int(mobj.group(1))
continue
return info_dict |
<SYSTEM_TASK:>
Query Asterisk Manager Interface for Trunk Stats.
<END_TASK>
<USER_TASK:>
Description:
def getTrunkStats(self, trunkList):
"""Query Asterisk Manager Interface for Trunk Stats.
CLI Command - core show channels
@param trunkList: List of tuples of one of the two following types:
(Trunk Name, Regular Expression)
(Trunk Name, Regular Expression, MIN, MAX)
@return: Dictionary of trunk utilization statistics.
""" |
re_list = []
info_dict = {}
for filt in trunkList:
info_dict[filt[0]] = 0
re_list.append(re.compile(filt[1], re.IGNORECASE))
if self.checkVersion('1.4'):
cmd = "core show channels"
else:
cmd = "show channels"
cmdresp = self.executeCommand(cmd)
for line in cmdresp.splitlines():
for idx in range(len(re_list)):
recomp = re_list[idx]
trunkid = trunkList[idx][0]
mobj = recomp.match(line)
if mobj:
if len(trunkList[idx]) == 2:
info_dict[trunkid] += 1
continue
elif len(trunkList[idx]) == 4:
num = mobj.groupdict().get('num')
if num is not None:
(vmin,vmax) = trunkList[idx][2:4]
if int(num) >= int(vmin) and int(num) <= int(vmax):
info_dict[trunkid] += 1
continue
return info_dict |
<SYSTEM_TASK:>
A function to simulate a list
<END_TASK>
<USER_TASK:>
Description:
def simulate_list(nwords=16, nrec=10, ncats=4):
"""A function to simulate a list""" |
# load wordpool
wp = pd.read_csv('data/cut_wordpool.csv')
# get one list
wp = wp[wp['GROUP']==np.random.choice(list(range(16)), 1)[0]].sample(16)
wp['COLOR'] = [[int(np.random.rand() * 255) for i in range(3)] for i in range(16)] |
<SYSTEM_TASK:>
Utility method to check if a storage engine is included in graphs.
<END_TASK>
<USER_TASK:>
Description:
def engineIncluded(self, name):
"""Utility method to check if a storage engine is included in graphs.
@param name: Name of storage engine.
@return: Returns True if included in graphs, False otherwise.
""" |
if self._engines is None:
self._engines = self._dbconn.getStorageEngines()
return self.envCheckFilter('engine', name) and name in self._engines |
<SYSTEM_TASK:>
Connects via a RS-485 to Ethernet adapter.
<END_TASK>
<USER_TASK:>
Description:
def connect(self, host, port):
"""Connects via a RS-485 to Ethernet adapter.""" |
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, port))
self._reader = sock.makefile(mode='rb')
self._writer = sock.makefile(mode='wb') |
<SYSTEM_TASK:>
Returns a set containing the enabled states.
<END_TASK>
<USER_TASK:>
Description:
def states(self):
"""Returns a set containing the enabled states.""" |
state_list = []
for state in States:
if state.value & self._states != 0:
state_list.append(state)
if (self._flashing_states & States.FILTER) != 0:
state_list.append(States.FILTER_LOW_SPEED)
return state_list |
<SYSTEM_TASK:>
Returns True if the specified state is enabled.
<END_TASK>
<USER_TASK:>
Description:
def get_state(self, state):
"""Returns True if the specified state is enabled.""" |
# Check to see if we have a change request pending; if we do
# return the value we expect it to change to.
for data in list(self._send_queue.queue):
desired_states = data['desired_states']
for desired_state in desired_states:
if desired_state['state'] == state:
return desired_state['enabled']
if state == States.FILTER_LOW_SPEED:
return (States.FILTER.value & self._flashing_states) != 0
return (state.value & self._states) != 0 |
<SYSTEM_TASK:>
Decorates a function by tracing the begining and
<END_TASK>
<USER_TASK:>
Description:
def trace(function, *args, **k) :
"""Decorates a function by tracing the begining and
end of the function execution, if doTrace global is True""" |
if doTrace : print ("> "+function.__name__, args, k)
result = function(*args, **k)
if doTrace : print ("< "+function.__name__, args, k, "->", result)
return result |
<SYSTEM_TASK:>
Calculate the lower triangular matrix of the Cholesky decomposition of
<END_TASK>
<USER_TASK:>
Description:
def chol(A):
"""
Calculate the lower triangular matrix of the Cholesky decomposition of
a symmetric, positive-definite matrix.
""" |
A = np.array(A)
assert A.shape[0] == A.shape[1], "Input matrix must be square"
L = [[0.0] * len(A) for _ in range(len(A))]
for i in range(len(A)):
for j in range(i + 1):
s = sum(L[i][k] * L[j][k] for k in range(j))
L[i][j] = (
(A[i][i] - s) ** 0.5 if (i == j) else (1.0 / L[j][j] * (A[i][j] - s))
)
return np.array(L) |
<SYSTEM_TASK:>
Convert radians to degrees
<END_TASK>
<USER_TASK:>
Description:
def degrees(x):
"""
Convert radians to degrees
""" |
if isinstance(x, UncertainFunction):
mcpts = np.degrees(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.degrees(x) |
<SYSTEM_TASK:>
Calculate the hypotenuse given two "legs" of a right triangle
<END_TASK>
<USER_TASK:>
Description:
def hypot(x, y):
"""
Calculate the hypotenuse given two "legs" of a right triangle
""" |
if isinstance(x, UncertainFunction) or isinstance(x, UncertainFunction):
ufx = to_uncertain_func(x)
ufy = to_uncertain_func(y)
mcpts = np.hypot(ufx._mcpts, ufy._mcpts)
return UncertainFunction(mcpts)
else:
return np.hypot(x, y) |
<SYSTEM_TASK:>
Convert degrees to radians
<END_TASK>
<USER_TASK:>
Description:
def radians(x):
"""
Convert degrees to radians
""" |
if isinstance(x, UncertainFunction):
mcpts = np.radians(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.radians(x) |
<SYSTEM_TASK:>
Truncate the values to the integer value without rounding
<END_TASK>
<USER_TASK:>
Description:
def trunc(x):
"""
Truncate the values to the integer value without rounding
""" |
if isinstance(x, UncertainFunction):
mcpts = np.trunc(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.trunc(x) |
<SYSTEM_TASK:>
Variance value as a result of an uncertainty calculation
<END_TASK>
<USER_TASK:>
Description:
def var(self):
"""
Variance value as a result of an uncertainty calculation
""" |
mn = self.mean
vr = np.mean((self._mcpts - mn) ** 2)
return vr |
<SYSTEM_TASK:>
Loads the hat from a picture at path.
<END_TASK>
<USER_TASK:>
Description:
def load_hat(self, path): # pylint: disable=no-self-use
"""Loads the hat from a picture at path.
Args:
path: The path to load from
Returns:
The hat data.
""" |
hat = cv2.imread(path, cv2.IMREAD_UNCHANGED)
if hat is None:
raise ValueError('No hat image found at `{}`'.format(path))
b, g, r, a = cv2.split(hat)
return cv2.merge((r, g, b, a)) |
<SYSTEM_TASK:>
Uses a haarcascade to detect faces inside an image.
<END_TASK>
<USER_TASK:>
Description:
def find_faces(self, image, draw_box=False):
"""Uses a haarcascade to detect faces inside an image.
Args:
image: The image.
draw_box: If True, the image will be marked with a rectangle.
Return:
The faces as returned by OpenCV's detectMultiScale method for
cascades.
""" |
frame_gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
faces = self.cascade.detectMultiScale(
frame_gray,
scaleFactor=1.3,
minNeighbors=5,
minSize=(50, 50),
flags=0)
if draw_box:
for x, y, w, h in faces:
cv2.rectangle(image, (x, y),
(x + w, y + h), (0, 255, 0), 2)
return faces |
<SYSTEM_TASK:>
Marks the object as changed.
<END_TASK>
<USER_TASK:>
Description:
def changed(self, message=None, *args):
"""Marks the object as changed.
If a `parent` attribute is set, the `changed()` method on the parent
will be called, propagating the change notification up the chain.
The message (if provided) will be debug logged.
""" |
if message is not None:
self.logger.debug('%s: %s', self._repr(), message % args)
self.logger.debug('%s: changed', self._repr())
if self.parent is not None:
self.parent.changed()
elif isinstance(self, Mutable):
super(TrackedObject, self).changed() |
<SYSTEM_TASK:>
Decorator for mutation tracker registration.
<END_TASK>
<USER_TASK:>
Description:
def register(cls, origin_type):
"""Decorator for mutation tracker registration.
The provided `origin_type` is mapped to the decorated class such that
future calls to `convert()` will convert the object of `origin_type`
to an instance of the decorated class.
""" |
def decorator(tracked_type):
"""Adds the decorated class to the `_type_mapping` dictionary."""
cls._type_mapping[origin_type] = tracked_type
return tracked_type
return decorator |
<SYSTEM_TASK:>
Converts objects to registered tracked types
<END_TASK>
<USER_TASK:>
Description:
def convert(cls, obj, parent):
"""Converts objects to registered tracked types
This checks the type of the given object against the registered tracked
types. When a match is found, the given object will be converted to the
tracked type, its parent set to the provided parent, and returned.
If its type does not occur in the registered types mapping, the object
is returned unchanged.
""" |
replacement_type = cls._type_mapping.get(type(obj))
if replacement_type is not None:
new = replacement_type(obj)
new.parent = parent
return new
return obj |
<SYSTEM_TASK:>
Generator like `convert_iterable`, but for 2-tuple iterators.
<END_TASK>
<USER_TASK:>
Description:
def convert_items(self, items):
"""Generator like `convert_iterable`, but for 2-tuple iterators.""" |
return ((key, self.convert(value, self)) for key, value in items) |
<SYSTEM_TASK:>
Convenience method to track either a dict or a 2-tuple iterator.
<END_TASK>
<USER_TASK:>
Description:
def convert_mapping(self, mapping):
"""Convenience method to track either a dict or a 2-tuple iterator.""" |
if isinstance(mapping, dict):
return self.convert_items(iteritems(mapping))
return self.convert_items(mapping) |
<SYSTEM_TASK:>
Checks if a function in a module was declared in that module.
<END_TASK>
<USER_TASK:>
Description:
def is_mod_function(mod, fun):
"""Checks if a function in a module was declared in that module.
http://stackoverflow.com/a/1107150/3004221
Args:
mod: the module
fun: the function
""" |
return inspect.isfunction(fun) and inspect.getmodule(fun) == mod |
<SYSTEM_TASK:>
Checks if a class in a module was declared in that module.
<END_TASK>
<USER_TASK:>
Description:
def is_mod_class(mod, cls):
"""Checks if a class in a module was declared in that module.
Args:
mod: the module
cls: the class
""" |
return inspect.isclass(cls) and inspect.getmodule(cls) == mod |
<SYSTEM_TASK:>
Lists all functions declared in a module.
<END_TASK>
<USER_TASK:>
Description:
def list_functions(mod_name):
"""Lists all functions declared in a module.
http://stackoverflow.com/a/1107150/3004221
Args:
mod_name: the module name
Returns:
A list of functions declared in that module.
""" |
mod = sys.modules[mod_name]
return [func.__name__ for func in mod.__dict__.values()
if is_mod_function(mod, func)] |
<SYSTEM_TASK:>
Lists all classes declared in a module.
<END_TASK>
<USER_TASK:>
Description:
def list_classes(mod_name):
"""Lists all classes declared in a module.
Args:
mod_name: the module name
Returns:
A list of functions declared in that module.
""" |
mod = sys.modules[mod_name]
return [cls.__name__ for cls in mod.__dict__.values()
if is_mod_class(mod, cls)] |
<SYSTEM_TASK:>
Returns a dictionary which maps function names to line numbers.
<END_TASK>
<USER_TASK:>
Description:
def get_linenumbers(functions, module, searchstr='def {}(image):\n'):
"""Returns a dictionary which maps function names to line numbers.
Args:
functions: a list of function names
module: the module to look the functions up
searchstr: the string to search for
Returns:
A dictionary with functions as keys and their line numbers as values.
""" |
lines = inspect.getsourcelines(module)[0]
line_numbers = {}
for function in functions:
try:
line_numbers[function] = lines.index(
searchstr.format(function)) + 1
except ValueError:
print(r'Can not find `{}`'.format(searchstr.format(function)))
line_numbers[function] = 0
return line_numbers |
<SYSTEM_TASK:>
Formats the documentation in a nicer way and for notebook cells.
<END_TASK>
<USER_TASK:>
Description:
def format_doc(fun):
"""Formats the documentation in a nicer way and for notebook cells.""" |
SEPARATOR = '============================='
func = cvloop.functions.__dict__[fun]
doc_lines = ['{}'.format(l).strip() for l in func.__doc__.split('\n')]
if hasattr(func, '__init__'):
doc_lines.append(SEPARATOR)
doc_lines += ['{}'.format(l).strip() for l in
func.__init__.__doc__.split('\n')]
mod_lines = []
argblock = False
returnblock = False
for line in doc_lines:
if line == SEPARATOR:
mod_lines.append('\n#### `{}.__init__(...)`:\n\n'.format(fun))
elif 'Args:' in line:
argblock = True
if GENERATE_ARGS:
mod_lines.append('**{}**\n'.format(line))
elif 'Returns:' in line:
returnblock = True
mod_lines.append('\n**{}**'.format(line))
elif not argblock and not returnblock:
mod_lines.append('{}\n'.format(line))
elif argblock and not returnblock and ':' in line:
if GENERATE_ARGS:
mod_lines.append('- *{}:* {}\n'.format(
*line.split(':')))
elif returnblock:
mod_lines.append(line)
else:
mod_lines.append('{}\n'.format(line))
return mod_lines |
<SYSTEM_TASK:>
Main function creates the cvloop.functions example notebook.
<END_TASK>
<USER_TASK:>
Description:
def main():
"""Main function creates the cvloop.functions example notebook.""" |
notebook = {
'cells': [
{
'cell_type': 'markdown',
'metadata': {},
'source': [
'# cvloop functions\n\n',
'This notebook shows an overview over all cvloop ',
'functions provided in the [`cvloop.functions` module](',
'https://github.com/shoeffner/cvloop/blob/',
'develop/cvloop/functions.py).'
]
},
],
'nbformat': 4,
'nbformat_minor': 1,
'metadata': {
'language_info': {
'codemirror_mode': {
'name': 'ipython',
'version': 3
},
'file_extension': '.py',
'mimetype': 'text/x-python',
'name': 'python',
'nbconvert_exporter': 'python',
'pygments_lexer': 'ipython3',
'version': '3.5.1+'
}
}
}
classes = list_classes('cvloop.functions')
functions = list_functions('cvloop.functions')
line_numbers_cls = get_linenumbers(classes, cvloop.functions,
'class {}:\n')
line_numbers = get_linenumbers(functions, cvloop.functions)
for cls in classes:
line_number = line_numbers_cls[cls]
notebook['cells'].append(create_description_cell(cls, line_number))
notebook['cells'].append(create_code_cell(cls, isclass=True))
for func in functions:
line_number = line_numbers[func]
notebook['cells'].append(create_description_cell(func, line_number))
notebook['cells'].append(create_code_cell(func))
with open(sys.argv[1], 'w') as nfile:
json.dump(notebook, nfile, indent=4) |
<SYSTEM_TASK:>
Prepares an axes object for clean plotting.
<END_TASK>
<USER_TASK:>
Description:
def prepare_axes(axes, title, size, cmap=None):
"""Prepares an axes object for clean plotting.
Removes x and y axes labels and ticks, sets the aspect ratio to be
equal, uses the size to determine the drawing area and fills the image
with random colors as visual feedback.
Creates an AxesImage to be shown inside the axes object and sets the
needed properties.
Args:
axes: The axes object to modify.
title: The title.
size: The size of the expected image.
cmap: The colormap if a custom color map is needed.
(Default: None)
Returns:
The AxesImage's handle.
""" |
if axes is None:
return None
# prepare axis itself
axes.set_xlim([0, size[1]])
axes.set_ylim([size[0], 0])
axes.set_aspect('equal')
axes.axis('off')
if isinstance(cmap, str):
title = '{} (cmap: {})'.format(title, cmap)
axes.set_title(title)
# prepare image data
axes_image = image.AxesImage(axes, cmap=cmap,
extent=(0, size[1], size[0], 0))
axes_image.set_data(np.random.random((size[0], size[1], 3)))
axes.add_image(axes_image)
return axes_image |
<SYSTEM_TASK:>
Connects event handlers to the figure.
<END_TASK>
<USER_TASK:>
Description:
def connect_event_handlers(self):
"""Connects event handlers to the figure.""" |
self.figure.canvas.mpl_connect('close_event', self.evt_release)
self.figure.canvas.mpl_connect('pause_event', self.evt_toggle_pause) |
<SYSTEM_TASK:>
Pauses and resumes the video source.
<END_TASK>
<USER_TASK:>
Description:
def evt_toggle_pause(self, *args): # pylint: disable=unused-argument
"""Pauses and resumes the video source.""" |
if self.event_source._timer is None: # noqa: e501 pylint: disable=protected-access
self.event_source.start()
else:
self.event_source.stop() |
<SYSTEM_TASK:>
Prints information about the unprocessed image.
<END_TASK>
<USER_TASK:>
Description:
def print_info(self, capture):
"""Prints information about the unprocessed image.
Reads one frame from the source to determine image colors, dimensions
and data types.
Args:
capture: the source to read from.
""" |
self.frame_offset += 1
ret, frame = capture.read()
if ret:
print('Capture Information')
print('\tDimensions (HxW): {}x{}'.format(*frame.shape[0:2]))
print('\tColor channels: {}'.format(frame.shape[2] if
len(frame.shape) > 2 else 1))
print('\tColor range: {}-{}'.format(np.min(frame),
np.max(frame)))
print('\tdtype: {}'.format(frame.dtype))
else:
print('No source found.') |
<SYSTEM_TASK:>
Determines the height and width of the image source.
<END_TASK>
<USER_TASK:>
Description:
def determine_size(self, capture):
"""Determines the height and width of the image source.
If no dimensions are available, this method defaults to a resolution of
640x480, thus returns (480, 640).
If capture has a get method it is assumed to understand
`cv2.CAP_PROP_FRAME_WIDTH` and `cv2.CAP_PROP_FRAME_HEIGHT` to get the
information. Otherwise it reads one frame from the source to determine
image dimensions.
Args:
capture: the source to read from.
Returns:
A tuple containing integers of height and width (simple casts).
""" |
width = 640
height = 480
if capture and hasattr(capture, 'get'):
width = capture.get(cv2.CAP_PROP_FRAME_WIDTH)
height = capture.get(cv2.CAP_PROP_FRAME_HEIGHT)
else:
self.frame_offset += 1
ret, frame = capture.read()
if ret:
width = frame.shape[1]
height = frame.shape[0]
return (int(height), int(width)) |
<SYSTEM_TASK:>
Initializes the drawing of the frames by setting the images to
<END_TASK>
<USER_TASK:>
Description:
def _init_draw(self):
"""Initializes the drawing of the frames by setting the images to
random colors.
This function is called by TimedAnimation.
""" |
if self.original is not None:
self.original.set_data(np.random.random((10, 10, 3)))
self.processed.set_data(np.random.random((10, 10, 3))) |
<SYSTEM_TASK:>
Reads a frame and converts the color if needed.
<END_TASK>
<USER_TASK:>
Description:
def read_frame(self):
"""Reads a frame and converts the color if needed.
In case no frame is available, i.e. self.capture.read() returns False
as the first return value, the event_source of the TimedAnimation is
stopped, and if possible the capture source released.
Returns:
None if stopped, otherwise the color converted source image.
""" |
ret, frame = self.capture.read()
if not ret:
self.event_source.stop()
try:
self.capture.release()
except AttributeError:
# has no release method, thus just pass
pass
return None
if self.convert_color != -1 and is_color_image(frame):
return cv2.cvtColor(frame, self.convert_color)
return frame |
<SYSTEM_TASK:>
Annotates the processed axis with given annotations for
<END_TASK>
<USER_TASK:>
Description:
def annotate(self, framedata):
"""Annotates the processed axis with given annotations for
the provided framedata.
Args:
framedata: The current frame number.
""" |
for artist in self.annotation_artists:
artist.remove()
self.annotation_artists = []
for annotation in self.annotations:
if annotation[2] > framedata:
return
if annotation[2] == framedata:
pos = annotation[0:2]
shape = self.annotations_default['shape']
color = self.annotations_default['color']
size = self.annotations_default['size']
line = self.annotations_default['line']
if len(annotation) > 3:
shape = annotation[3].get('shape', shape)
color = annotation[3].get('color', color)
size = annotation[3].get('size', size)
line = annotation[3].get('line', line)
if shape == 'CIRC' and hasattr(size, '__len__'):
size = 30
if not hasattr(color, '__len__'):
color = (color,) * 3
if shape == 'RECT':
patch = patches.Rectangle((pos[0] - size[0] // 2,
pos[1] - size[1] // 2),
size[0], size[1], fill=False,
lw=line, fc='none', ec=color)
elif shape == 'CIRC':
patch = patches.CirclePolygon(pos, radius=size, fc='none',
ec=color, lw=line)
self.annotation_artists.append(patch)
self.axes_processed.add_artist(self.annotation_artists[-1]) |
<SYSTEM_TASK:>
Reads, processes and draws the frames.
<END_TASK>
<USER_TASK:>
Description:
def _draw_frame(self, framedata):
"""Reads, processes and draws the frames.
If needed for color maps, conversions to gray scale are performed. In
case the images are no color images and no custom color maps are
defined, the colormap `gray` is applied.
This function is called by TimedAnimation.
Args:
framedata: The frame data.
""" |
original = self.read_frame()
if original is None:
self.update_info(self.info_string(message='Finished.',
frame=framedata))
return
if self.original is not None:
processed = self.process_frame(original.copy())
if self.cmap_original is not None:
original = to_gray(original)
elif not is_color_image(original):
self.original.set_cmap('gray')
self.original.set_data(original)
else:
processed = self.process_frame(original)
if self.cmap_processed is not None:
processed = to_gray(processed)
elif not is_color_image(processed):
self.processed.set_cmap('gray')
if self.annotations:
self.annotate(framedata)
self.processed.set_data(processed)
self.update_info(self.info_string(frame=framedata)) |
<SYSTEM_TASK:>
Updates the figure's suptitle.
<END_TASK>
<USER_TASK:>
Description:
def update_info(self, custom=None):
"""Updates the figure's suptitle.
Calls self.info_string() unless custom is provided.
Args:
custom: Overwrite it with this string, unless None.
""" |
self.figure.suptitle(self.info_string() if custom is None else custom) |
<SYSTEM_TASK:>
Returns information about the stream.
<END_TASK>
<USER_TASK:>
Description:
def info_string(self, size=None, message='', frame=-1):
"""Returns information about the stream.
Generates a string containing size, frame number, and info messages.
Omits unnecessary information (e.g. empty messages and frame -1).
This method is primarily used to update the suptitle of the plot
figure.
Returns:
An info string.
""" |
info = []
if size is not None:
info.append('Size: {1}x{0}'.format(*size))
elif self.size is not None:
info.append('Size: {1}x{0}'.format(*self.size))
if frame >= 0:
info.append('Frame: {}'.format(frame))
if message != '':
info.append('{}'.format(message))
return ' '.join(info) |
<SYSTEM_TASK:>
Calculate how many items must be in the collection to satisfy this slice
<END_TASK>
<USER_TASK:>
Description:
def _slice_required_len(slice_obj):
"""
Calculate how many items must be in the collection to satisfy this slice
returns `None` for slices may vary based on the length of the underlying collection
such as `lst[-1]` or `lst[::]`
""" |
if slice_obj.step and slice_obj.step != 1:
return None
# (None, None, *) requires the entire list
if slice_obj.start is None and slice_obj.stop is None:
return None
# Negative indexes are hard without knowing the collection length
if slice_obj.start and slice_obj.start < 0:
return None
if slice_obj.stop and slice_obj.stop < 0:
return None
if slice_obj.stop:
if slice_obj.start and slice_obj.start > slice_obj.stop:
return 0
return slice_obj.stop
return slice_obj.start + 1 |
<SYSTEM_TASK:>
conveniently styles your text as and resets ANSI codes at its end.
<END_TASK>
<USER_TASK:>
Description:
def stylize(text, styles, reset=True):
"""conveniently styles your text as and resets ANSI codes at its end.""" |
terminator = attr("reset") if reset else ""
return "{}{}{}".format("".join(styles), text, terminator) |
<SYSTEM_TASK:>
Set or reset attributes
<END_TASK>
<USER_TASK:>
Description:
def attribute(self):
"""Set or reset attributes""" |
paint = {
"bold": self.ESC + "1" + self.END,
1: self.ESC + "1" + self.END,
"dim": self.ESC + "2" + self.END,
2: self.ESC + "2" + self.END,
"underlined": self.ESC + "4" + self.END,
4: self.ESC + "4" + self.END,
"blink": self.ESC + "5" + self.END,
5: self.ESC + "5" + self.END,
"reverse": self.ESC + "7" + self.END,
7: self.ESC + "7" + self.END,
"hidden": self.ESC + "8" + self.END,
8: self.ESC + "8" + self.END,
"reset": self.ESC + "0" + self.END,
0: self.ESC + "0" + self.END,
"res_bold": self.ESC + "21" + self.END,
21: self.ESC + "21" + self.END,
"res_dim": self.ESC + "22" + self.END,
22: self.ESC + "22" + self.END,
"res_underlined": self.ESC + "24" + self.END,
24: self.ESC + "24" + self.END,
"res_blink": self.ESC + "25" + self.END,
25: self.ESC + "25" + self.END,
"res_reverse": self.ESC + "27" + self.END,
27: self.ESC + "27" + self.END,
"res_hidden": self.ESC + "28" + self.END,
28: self.ESC + "28" + self.END,
}
return paint[self.color] |
<SYSTEM_TASK:>
Print 256 foreground colors
<END_TASK>
<USER_TASK:>
Description:
def foreground(self):
"""Print 256 foreground colors""" |
code = self.ESC + "38;5;"
if str(self.color).isdigit():
self.reverse_dict()
color = self.reserve_paint[str(self.color)]
return code + self.paint[color] + self.END
elif self.color.startswith("#"):
return code + str(self.HEX) + self.END
else:
return code + self.paint[self.color] + self.END |
<SYSTEM_TASK:>
Perform a reset and check for presence pulse.
<END_TASK>
<USER_TASK:>
Description:
def reset(self, required=False):
"""
Perform a reset and check for presence pulse.
:param bool required: require presence pulse
""" |
reset = self._ow.reset()
if required and reset:
raise OneWireError("No presence pulse found. Check devices and wiring.")
return not reset |
<SYSTEM_TASK:>
Scan for devices on the bus and return a list of addresses.
<END_TASK>
<USER_TASK:>
Description:
def scan(self):
"""Scan for devices on the bus and return a list of addresses.""" |
devices = []
diff = 65
rom = False
count = 0
for _ in range(0xff):
rom, diff = self._search_rom(rom, diff)
if rom:
count += 1
if count > self.maximum_devices:
raise RuntimeError(
"Maximum device count of {} exceeded."\
.format(self.maximum_devices))
devices.append(OneWireAddress(rom))
if diff == 0:
break
return devices |
<SYSTEM_TASK:>
Perform the 1-Wire CRC check on the provided data.
<END_TASK>
<USER_TASK:>
Description:
def crc8(data):
"""
Perform the 1-Wire CRC check on the provided data.
:param bytearray data: 8 byte array representing 64 bit ROM code
""" |
crc = 0
for byte in data:
crc ^= byte
for _ in range(8):
if crc & 0x01:
crc = (crc >> 1) ^ 0x8C
else:
crc >>= 1
crc &= 0xFF
return crc |
<SYSTEM_TASK:>
Adds various preferences members to preferences.preferences,
<END_TASK>
<USER_TASK:>
Description:
def preferences_class_prepared(sender, *args, **kwargs):
"""
Adds various preferences members to preferences.preferences,
thus enabling easy access from code.
""" |
cls = sender
if issubclass(cls, Preferences):
# Add singleton manager to subclasses.
cls.add_to_class('singleton', SingletonManager())
# Add property for preferences object to preferences.preferences.
setattr(preferences.Preferences, cls._meta.object_name, property(lambda x: cls.singleton.get())) |
<SYSTEM_TASK:>
Make sure there is only a single preferences object per site.
<END_TASK>
<USER_TASK:>
Description:
def site_cleanup(sender, action, instance, **kwargs):
"""
Make sure there is only a single preferences object per site.
So remove sites from pre-existing preferences objects.
""" |
if action == 'post_add':
if isinstance(instance, Preferences) \
and hasattr(instance.__class__, 'objects'):
site_conflicts = instance.__class__.objects.filter(
sites__in=instance.sites.all()
).only('id').distinct()
for conflict in site_conflicts:
if conflict.id != instance.id:
for site in instance.sites.all():
conflict.sites.remove(site) |
<SYSTEM_TASK:>
Return the first preferences object for the current site.
<END_TASK>
<USER_TASK:>
Description:
def get_queryset(self):
"""
Return the first preferences object for the current site.
If preferences do not exist create it.
""" |
queryset = super(SingletonManager, self).get_queryset()
# Get current site
current_site = None
if getattr(settings, 'SITE_ID', None) is not None:
current_site = Site.objects.get_current()
# If site found limit queryset to site.
if current_site is not None:
queryset = queryset.filter(sites=settings.SITE_ID)
if not queryset.exists():
# Create object (for current site) if it doesn't exist.
obj = self.model.objects.create()
if current_site is not None:
obj.sites.add(current_site)
return queryset |
<SYSTEM_TASK:>
Return the unmasked overfitting metric for a given transit depth.
<END_TASK>
<USER_TASK:>
Description:
def unmasked(self, depth=0.01):
"""Return the unmasked overfitting metric for a given transit depth.""" |
return 1 - (np.hstack(self._O2) +
np.hstack(self._O3) / depth) / np.hstack(self._O1) |
<SYSTEM_TASK:>
Return the current observing season.
<END_TASK>
<USER_TASK:>
Description:
def season(self):
"""
Return the current observing season.
For *K2*, this is the observing campaign, while for *Kepler*,
it is the current quarter.
""" |
try:
self._season
except AttributeError:
self._season = self._mission.Season(self.ID)
if hasattr(self._season, '__len__'):
raise AttributeError(
"Please choose a campaign/season for this target: %s." %
self._season)
return self._season |
<SYSTEM_TASK:>
Why is my target not in the EVEREST database?
<END_TASK>
<USER_TASK:>
Description:
def Search(ID, mission='k2'):
"""Why is my target not in the EVEREST database?""" |
# Only K2 supported for now
assert mission == 'k2', "Only the K2 mission is supported for now."
print("Searching for target %d..." % ID)
# First check if it is in the database
season = missions.k2.Season(ID)
if season in [91, 92, [91, 92]]:
print("Campaign 9 is currently not part of the EVEREST catalog.")
return
elif season == 101:
print("The first half of campaign 10 is not currently part of " +
"the EVEREST catalog.")
return
elif season is not None:
print("Target is in campaign %d of the EVEREST catalog." % season)
return
# Get the kplr object
star = k2plr_client.k2_star(ID)
# First check if this is a star
if star.objtype.lower() != "star":
print("Target is of type %s, not STAR, " % star.objtype +
"and is therefore not included in the EVEREST catalog.")
return
# Let's try to download the pixel data and see what happens
try:
tpf = star.get_target_pixel_files()
except:
print("Unable to download the raw pixel files for this target.")
return
if len(tpf) == 0:
print("Raw pixel files are not available for this target. Looks like " +
"data may not have been collected for it.")
return
# Perhaps it's in a campaign we haven't gotten to yet
if tpf[0].sci_campaign not in missions.k2.SEASONS:
print("Targets for campaign %d are not yet available."
% tpf[0].sci_campaign)
return
# Let's try to download the K2SFF data
try:
k2sff = k2plr.K2SFF(ID)
except:
print("Error downloading the K2SFF light curve for this target. " +
"Currently, EVEREST uses the K2SFF apertures to perform " +
"photometry. This is likely to change in the next version.")
return
# Let's try to get the aperture
try:
assert np.count_nonzero(k2sff.apertures[15]), "Invalid aperture."
except:
print("Unable to retrieve the K2SFF aperture for this target. " +
"Currently, EVEREST uses the K2SFF apertures to perform " +
"photometry. This is likely to change in the next version.")
return
# Perhaps the star is *super* saturated and we didn't bother
# de-trending it?
if star.kp < 8:
print("Target has Kp = %.1f and is too saturated " +
"for proper de-trending with EVEREST.")
return
# I'm out of ideas
print("I'm not sure why this target isn't in the EVEREST catalog." +
"You can try de-trending it yourself:")
print("http://faculty.washington.edu/rodluger/everest/pipeline.html")
return |
<SYSTEM_TASK:>
lookup the value of the var_name on the stack of contexts
<END_TASK>
<USER_TASK:>
Description:
def lookup(var_name, contexts=(), start=0):
"""lookup the value of the var_name on the stack of contexts
:var_name: TODO
:contexts: TODO
:returns: None if not found
""" |
start = len(contexts) if start >=0 else start
for context in reversed(contexts[:start]):
try:
if var_name in context:
return context[var_name]
except TypeError as te:
# we may put variable on the context, skip it
continue
return None |
<SYSTEM_TASK:>
convert delimiters to corresponding regular expressions
<END_TASK>
<USER_TASK:>
Description:
def delimiters_to_re(delimiters):
"""convert delimiters to corresponding regular expressions""" |
# caching
delimiters = tuple(delimiters)
if delimiters in re_delimiters:
re_tag = re_delimiters[delimiters]
else:
open_tag, close_tag = delimiters
# escape
open_tag = ''.join([c if c.isalnum() else '\\' + c for c in open_tag])
close_tag = ''.join([c if c.isalnum() else '\\' + c for c in close_tag])
re_tag = re.compile(open_tag + r'([#^>&{/!=]?)\s*(.*?)\s*([}=]?)' + close_tag, re.DOTALL)
re_delimiters[delimiters] = re_tag
return re_tag |
<SYSTEM_TASK:>
Escape text according to self.escape
<END_TASK>
<USER_TASK:>
Description:
def _escape(self, text):
"""Escape text according to self.escape""" |
ret = EMPTYSTRING if text is None else str(text)
if self.escape:
return html_escape(ret)
else:
return ret |
<SYSTEM_TASK:>
lookup value for names like 'a.b.c' and handle filters as well
<END_TASK>
<USER_TASK:>
Description:
def _lookup(self, dot_name, contexts):
"""lookup value for names like 'a.b.c' and handle filters as well""" |
# process filters
filters = [x for x in map(lambda x: x.strip(), dot_name.split('|'))]
dot_name = filters[0]
filters = filters[1:]
# should support paths like '../../a.b.c/../d', etc.
if not dot_name.startswith('.'):
dot_name = './' + dot_name
paths = dot_name.split('/')
last_path = paths[-1]
# path like '../..' or ./../. etc.
refer_context = last_path == '' or last_path == '.' or last_path == '..'
paths = paths if refer_context else paths[:-1]
# count path level
level = 0
for path in paths:
if path == '..':
level -= 1
elif path != '.':
# ../a.b.c/.. in the middle
level += len(path.strip('.').split('.'))
names = last_path.split('.')
# fetch the correct context
if refer_context or names[0] == '':
try:
value = contexts[level-1]
except:
value = None
else:
# support {{a.b.c.d.e}} like lookup
value = lookup(names[0], contexts, level)
# lookup for variables
if not refer_context:
for name in names[1:]:
try:
# a.num (a.1, a.2) to access list
index = parse_int(name)
name = parse_int(name) if isinstance(value, (list, tuple)) else name
value = value[name]
except:
# not found
value = None
break;
# apply filters
for f in filters:
try:
func = self.root.filters[f]
value = func(value)
except:
continue
return value |
<SYSTEM_TASK:>
Render the children tokens
<END_TASK>
<USER_TASK:>
Description:
def _render_children(self, contexts, partials):
"""Render the children tokens""" |
ret = []
for child in self.children:
ret.append(child._render(contexts, partials))
return EMPTYSTRING.join(ret) |
<SYSTEM_TASK:>
Handle any pending relations to the sending model.
<END_TASK>
<USER_TASK:>
Description:
def do_pending_lookups(event, sender, **kwargs):
"""Handle any pending relations to the sending model.
Sent from class_prepared.""" |
key = (sender._meta.app_label, sender._meta.name)
for callback in pending_lookups.pop(key, []):
callback(sender) |
<SYSTEM_TASK:>
If this isn't the master process, wait for instructions.
<END_TASK>
<USER_TASK:>
Description:
def wait(self):
"""
If this isn't the master process, wait for instructions.
""" |
if self.is_master():
raise RuntimeError("Master node told to await jobs.")
status = MPI.Status()
while True:
# Event loop.
# Sit here and await instructions.
if self.debug:
print("Worker {0} waiting for task.".format(self.rank))
# Blocking receive to wait for instructions.
task = self.comm.recv(source=0, tag=MPI.ANY_TAG, status=status)
if self.debug:
print("Worker {0} got task {1} with tag {2}."
.format(self.rank, type(task), status.tag))
# Check if message is special sentinel signaling end.
# If so, stop.
if isinstance(task, _close_pool_message):
if self.debug:
print("Worker {0} told to quit.".format(self.rank))
break
# Check if message is special type containing new function
# to be applied
if isinstance(task, _function_wrapper):
self.function = task.function
if self.debug:
print("Worker {0} replaced its task function: {1}."
.format(self.rank, self.function))
continue
# If not a special message, just run the known function on
# the input and return it asynchronously.
result = self.function(task)
if self.debug:
print("Worker {0} sending answer {1} with tag {2}."
.format(self.rank, type(result), status.tag))
self.comm.isend(result, dest=0, tag=status.tag)
# Kill the process?
if self.exit_on_end:
sys.exit() |
<SYSTEM_TASK:>
Return the log prior given parameter vector `x`.
<END_TASK>
<USER_TASK:>
Description:
def lnprior(x):
"""Return the log prior given parameter vector `x`.""" |
per, t0, b = x
if b < -1 or b > 1:
return -np.inf
elif per < 7 or per > 10:
return -np.inf
elif t0 < 1978 or t0 > 1979:
return -np.inf
else:
return 0. |
<SYSTEM_TASK:>
Initializes snow extension
<END_TASK>
<USER_TASK:>
Description:
def init_app(self, app, session=None, parameters=None):
"""Initializes snow extension
Set config default and find out which client type to use
:param app: App passed from constructor or directly to init_app (factory)
:param session: requests-compatible session to pass along to init_app
:param parameters: `ParamsBuilder` object passed to `Client` after instantiation
:raises:
- ConfigError - if unable to determine client type
""" |
if parameters is not None and not isinstance(parameters, ParamsBuilder):
raise InvalidUsage("parameters should be a pysnow.ParamsBuilder object, not %r" % type(parameters).__name__)
self._session = session
self._parameters = parameters
app.config.setdefault('SNOW_INSTANCE', None)
app.config.setdefault('SNOW_HOST', None)
app.config.setdefault('SNOW_USER', None)
app.config.setdefault('SNOW_PASSWORD', None)
app.config.setdefault('SNOW_OAUTH_CLIENT_ID', None)
app.config.setdefault('SNOW_OAUTH_CLIENT_SECRET', None)
app.config.setdefault('SNOW_USE_SSL', True)
if app.config['SNOW_OAUTH_CLIENT_ID'] and app.config['SNOW_OAUTH_CLIENT_SECRET']:
self._client_type_oauth = True
elif self._session or (app.config['SNOW_USER'] and app.config['SNOW_PASSWORD']):
self._client_type_basic = True
else:
raise ConfigError("You must supply user credentials, a session or OAuth credentials to use flask-snow") |
<SYSTEM_TASK:>
Snow connection instance, stores a `pysnow.Client` instance and `pysnow.Resource` instances
<END_TASK>
<USER_TASK:>
Description:
def connection(self):
"""Snow connection instance, stores a `pysnow.Client` instance and `pysnow.Resource` instances
Creates a new :class:`pysnow.Client` object if it doesn't exist in the app slice of the context stack
:returns: :class:`pysnow.Client` object
""" |
ctx = stack.top.app
if ctx is not None:
if not hasattr(ctx, 'snow'):
if self._client_type_oauth:
if not self._token_updater:
warnings.warn("No token updater has been set. Token refreshes will be ignored.")
client = self._get_oauth_client()
else:
client = self._get_basic_client()
if self._parameters:
# Set parameters passed on app init
client.parameters = self._parameters
ctx.snow = client
return ctx.snow |
<SYSTEM_TASK:>
Validate an email with the given key
<END_TASK>
<USER_TASK:>
Description:
def get(self, request, key):
"""Validate an email with the given key""" |
try:
email_val = EmailAddressValidation.objects.get(validation_key=key)
except EmailAddressValidation.DoesNotExist:
messages.error(request, _('The email address you are trying to '
'verify either has already been verified'
' or does not exist.'))
return redirect('/')
try:
email = EmailAddress.objects.get(address=email_val.address)
except EmailAddress.DoesNotExist:
email = EmailAddress(address=email_val.address)
if email.user and email.user.is_active:
messages.error(request, _('The email address you are trying to '
'verify is already an active email '
'address.'))
email_val.delete()
return redirect('/')
email.user = email_val.user
email.save()
email_val.delete()
user = User.objects.get(username=email.user.username)
user.is_active = True
user.save()
messages.success(request, _('Email address verified!'))
return redirect('user_profile', username=email_val.user.username) |
<SYSTEM_TASK:>
Remove an email address, validated or not.
<END_TASK>
<USER_TASK:>
Description:
def delete(self, request, key):
"""Remove an email address, validated or not.""" |
request.DELETE = http.QueryDict(request.body)
email_addr = request.DELETE.get('email')
user_id = request.DELETE.get('user')
if not email_addr:
return http.HttpResponseBadRequest()
try:
email = EmailAddressValidation.objects.get(address=email_addr,
user_id=user_id)
except EmailAddressValidation.DoesNotExist:
pass
else:
email.delete()
return http.HttpResponse(status=204)
try:
email = EmailAddress.objects.get(address=email_addr,
user_id=user_id)
except EmailAddress.DoesNotExist:
raise http.Http404
email.user = None
email.save()
return http.HttpResponse(status=204) |
<SYSTEM_TASK:>
Set an email address as primary address.
<END_TASK>
<USER_TASK:>
Description:
def update(self, request, key):
"""Set an email address as primary address.""" |
request.UPDATE = http.QueryDict(request.body)
email_addr = request.UPDATE.get('email')
user_id = request.UPDATE.get('user')
if not email_addr:
return http.HttpResponseBadRequest()
try:
email = EmailAddress.objects.get(address=email_addr,
user_id=user_id)
except EmailAddress.DoesNotExist:
raise http.Http404
email.user.email = email_addr
email.user.save()
return http.HttpResponse(status=204) |
<SYSTEM_TASK:>
Get the environment setting or return exception
<END_TASK>
<USER_TASK:>
Description:
def get_env_setting(setting):
""" Get the environment setting or return exception """ |
try:
return os.environ[setting]
except KeyError:
error_msg = "Set the %s env variable" % setting
raise ImproperlyConfigured(error_msg) |
<SYSTEM_TASK:>
Verifies if a social account is valid.
<END_TASK>
<USER_TASK:>
Description:
def validate_social_account(account, url):
"""Verifies if a social account is valid.
Examples:
>>> validate_social_account('seocam', 'http://twitter.com')
True
>>> validate_social_account('seocam-fake-should-fail',
'http://twitter.com')
False
""" |
request = urllib2.Request(urlparse.urljoin(url, account))
request.get_method = lambda: 'HEAD'
try:
response = urllib2.urlopen(request)
except urllib2.HTTPError:
return False
return response.code == 200 |
<SYSTEM_TASK:>
Iterator yielding unprefixed events.
<END_TASK>
<USER_TASK:>
Description:
def basic_parse(response, buf_size=ijson.backend.BUFSIZE):
"""
Iterator yielding unprefixed events.
Parameters:
- response: a stream response from requests
""" |
lexer = iter(IncrementalJsonParser.lexer(response, buf_size))
for value in ijson.backend.parse_value(lexer):
yield value
try:
next(lexer)
except StopIteration:
pass
else:
raise ijson.common.JSONError('Additional data') |
<SYSTEM_TASK:>
Force server to close current client subscription connection to the server
<END_TASK>
<USER_TASK:>
Description:
def drop_connection(self, name, database=None):
"""
Force server to close current client subscription connection to the server
@param str name: The name of the subscription
@param str database: The name of the database
""" |
request_executor = self._store.get_request_executor(database)
command = DropSubscriptionConnectionCommand(name)
request_executor.execute(command) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.