code
stringlengths 52
7.75k
| docs
stringlengths 1
5.85k
|
---|---|
def connectShell(connection, protocol):
deferred = connectSession(connection, protocol)
@deferred.addCallback
def requestSubsystem(session):
return session.requestShell()
return deferred | Connect a Protocol to a ssh shell session |
def connectSubsystem(connection, protocol, subsystem):
deferred = connectSession(connection, protocol)
@deferred.addCallback
def requestSubsystem(session):
return session.requestSubsystem(subsystem)
return deferred | Connect a Protocol to a ssh subsystem channel |
def connectSession(connection, protocol, sessionFactory=None, *args, **kwargs):
factory = sessionFactory or defaultSessionFactory
session = factory(*args, **kwargs)
session.dataReceived = protocol.dataReceived
session.closed = lambda: protocol.connectionLost(connectionDone)
deferred = defer.Deferred()
@deferred.addCallback
def connectProtocolAndReturnSession(specificData):
protocol.makeConnection(session)
return session
session.sessionOpen = deferred.callback
session.openFailed = deferred.errback
connection.openChannel(session)
return deferred | Open a SSHSession channel and connect a Protocol to it
@param connection: the SSH Connection to open the session channel on
@param protocol: the Protocol instance to connect to the session
@param sessionFactory: factory method to generate a SSHSession instance
@note: :args: and :kwargs: are passed to the sessionFactory |
def defaultSessionFactory(env={}, usePTY=False, *args, **kwargs):
return SSHSession(env, usePTY, *args, **kwargs) | Create a SSHChannel of the given :channelType: type |
def requestExec(self, commandLine):
data = common.NS(commandLine)
return self.sendRequest('exec', data, wantReply=True) | Request execution of :commandLine: and return a deferred reply. |
def requestSubsystem(self, subsystem):
data = common.NS(subsystem)
return self.sendRequest('subsystem', data, wantReply=True) | Request a subsystem and return a deferred reply. |
def requestPty(self, term=None, rows=0, cols=0, xpixel=0, ypixel=0, modes=''):
#TODO: Needs testing!
term = term or os.environ.get('TERM', '')
data = packRequest_pty_req(term, (rows, cols, xpixel, ypixel), modes)
return self.sendRequest('pty-req', data) | Request allocation of a pseudo-terminal for a channel
@param term: TERM environment variable value (e.g., vt100)
@param columns: terminal width, characters (e.g., 80)
@param rows: terminal height, rows (e.g., 24)
@param width: terminal width, pixels (e.g., 640)
@param height: terminal height, pixels (e.g., 480)
@param modes: encoded terminal modes
The dimension parameters are only informational.
Zero dimension parameters are ignored. The columns/rows dimensions
override the pixel dimensions (when nonzero). Pixel dimensions refer
to the drawable area of the window. |
def requestEnv(self, env={}):
for variable, value in env.items():
data = common.NS(variable) + common.NS(value)
self.sendRequest('env', data) | Send requests to set the environment variables for the channel |
def commandstr(command):
if command == CMD_MESSAGE_ERROR:
msg = "CMD_MESSAGE_ERROR"
elif command == CMD_MESSAGE_LIST:
msg = "CMD_MESSAGE_LIST"
elif command == CMD_MESSAGE_PASSWORD:
msg = "CMD_MESSAGE_PASSWORD"
elif command == CMD_MESSAGE_MP3:
msg = "CMD_MESSAGE_MP3"
elif command == CMD_MESSAGE_DELETE:
msg = "CMD_MESSAGE_DELETE"
elif command == CMD_MESSAGE_VERSION:
msg = "CMD_MESSAGE_VERSION"
elif command == CMD_MESSAGE_CDR_AVAILABLE:
msg = "CMD_MESSAGE_CDR_AVAILABLE"
elif command == CMD_MESSAGE_CDR:
msg = "CMD_MESSAGE_CDR"
else:
msg = "CMD_MESSAGE_UNKNOWN"
return msg | Convert command into string. |
def run():
parser = OptionParser(
version=__version__, description=__doc__,
)
parser.add_option(
'-u', '--url', dest='url',
help='Database URL (connection string)',
)
parser.add_option(
'-r', '--render', dest='render', default='dot',
choices=['plantuml', 'dot'],
help='Output format - plantuml or dot',
)
parser.add_option(
'-l', '--list', dest='list', action='store_true',
help='Output database list of tables and exit',
)
parser.add_option(
'-i', '--include', dest='include',
help='List of tables to include through ","',
)
parser.add_option(
'-e', '--exclude', dest='exclude',
help='List of tables to exlude through ","',
)
(options, args) = parser.parse_args()
if not options.url:
print('-u/--url option required')
exit(1)
engine = create_engine(options.url)
meta = MetaData()
meta.reflect(bind=engine)
if options.list:
print('Database tables:')
tables = sorted(meta.tables.keys())
def _g(l, i):
try:
return tables[i]
except IndexError:
return ''
for i in range(0, len(tables), 2):
print(' {0}{1}{2}'.format(
_g(tables, i),
' ' * (38 - len(_g(tables, i))),
_g(tables, i + 1),
))
exit(0)
tables = set(meta.tables.keys())
if options.include:
tables &= set(map(string.strip, options.include.split(',')))
if options.exclude:
tables -= set(map(string.strip, options.exclude.split(',')))
desc = describe(map(lambda x: operator.getitem(meta.tables, x), tables))
print(getattr(render, options.render)(desc)) | Command for reflection database objects |
def get_poll(poll_id):
return StrawPoll(requests.get('{api_url}/{poll_id}'.format(api_url=api_url, poll_id=poll_id))) | Get a strawpoll.
Example:
poll = strawpy.get_poll('11682852')
:param poll_id:
:return: strawpy.Strawpoll object |
def create_poll(title, options, multi=True, permissive=True, captcha=False, dupcheck='normal'):
query = {
'title': title,
'options': options,
'multi': multi,
'permissive': permissive,
'captcha': captcha,
'dupcheck': dupcheck
}
return StrawPoll(requests.post('http://strawpoll.me/api/v2/polls', data=json.dumps(query))) | Create a strawpoll.
Example:
new_poll = strawpy.create_poll('Is Python the best?', ['Yes', 'No'])
:param title:
:param options:
:param multi:
:param permissive:
:param captcha:
:param dupcheck:
:return: strawpy.Strawpoll object |
def raise_status(response):
if response.status_code != 200:
if response.status_code == 401:
raise StrawPollException('Unauthorized', response)
elif response.status_code == 403:
raise StrawPollException('Forbidden', response)
elif response.status_code == 404:
raise StrawPollException('Not Found', response)
else:
response.raise_for_status() | Raise an exception if the request did not return a status code of 200.
:param response: Request response body |
def refresh(self):
strawpoll_response = requests.get('{api_url}/{poll_id}'.format(api_url=api_url, poll_id=self.id))
raise_status(strawpoll_response)
self.status_code = strawpoll_response.status_code
self.response_json = strawpoll_response.json()
self.id = self.response_json['id']
self.title = self.response_json['title']
self.options = self.response_json['options']
self.votes = self.response_json['votes']
self.captcha = self.response_json['captcha']
self.dupcheck = self.response_json['dupcheck']
self.url = 'https://www.strawpoll.me/{id}'.format(id=self.id)
self.results_url = 'https://www.strawpoll.me/{id}/r'.format(id=self.id) | Refresh all class attributes. |
def write_json_file(self, path):
with open(path, "w") as f:
f.write(self.to_json()) | Serialize this VariantCollection to a JSON representation and write it
out to a text file. |
def read_json_file(cls, path):
with open(path, 'r') as f:
json_string = f.read()
return cls.from_json(json_string) | Construct a VariantCollection from a JSON file. |
def dumps(data, escape=False, **kwargs):
if 'sort_keys' not in kwargs:
kwargs['sort_keys'] = True
converted = json.dumps(data, default=_converter, **kwargs)
if escape:
# We're escaping the whole dumped string here cause there's no (easy)
# way to hook into the native json library and change how they process
# values like strings, None objects and some other "literal" stuff.
#
# Also, we're not escaping quotes here cause they're escaped by the
# native json library already. So, we just escape basic html entities,
# like <, > and &;
return cgi.escape(converted)
return converted | A wrapper around `json.dumps` that can handle objects that json
module is not aware.
This function is aware of a list of custom serializers that can be
registered by the API user, making it possible to convert any kind
of object to types that the json library can handle. |
def deserialize(klass, data):
handler = DESERIALIZE_REGISTRY.get(klass)
if handler:
return handler(data)
raise TypeError("There is no deserializer registered to handle "
"instances of '{}'".format(klass.__name__)) | Helper function to access a method that creates objects of a
given `klass` with the received `data`. |
def _convert_from(data):
try:
module, klass_name = data['__class__'].rsplit('.', 1)
klass = getattr(import_module(module), klass_name)
except (ImportError, AttributeError, KeyError):
# But I still haven't found what I'm looking for
#
# Waiting for three different exceptions here. KeyError will
# raise if can't find the "__class__" entry in the json `data`
# dictionary. ImportError happens when the module present in the
# dotted name can't be resolved. Finally, the AttributeError
# happens when we can find the module, but couldn't find the
# class on it.
return data
return deserialize(klass, data['__value__']) | Internal function that will be hooked to the native `json.loads`
Find the right deserializer for a given value, taking into account
the internal deserializer registry. |
def _converter(data):
handler = REGISTRY.get(data.__class__)
if handler:
full_name = '{}.{}'.format(
data.__class__.__module__,
data.__class__.__name__)
return {
'__class__': full_name,
'__value__': handler(data),
}
raise TypeError(repr(data) + " is not JSON serializable") | Internal function that will be passed to the native `json.dumps`.
This function uses the `REGISTRY` of serializers and try to convert
a given instance to an object that json.dumps can understand. |
def start(self):
logging.info('ResponseBot started')
handler_classes = handler_utils.discover_handler_classes(self.config.get('handlers_package'))
if len(handler_classes) == 0:
logging.warning('No handler found. Did you forget to extend BaseTweethandler? Check --handlers-module')
while True:
try:
client = auth_utils.auth(self.config)
listener = ResponseBotListener(client=client, handler_classes=handler_classes)
stream = ResponseBotStream(client=client, listener=listener)
stream.start()
except (APIQuotaError, AuthenticationError, TweepError) as e:
self.handle_error(e)
else:
break | Try to init the main sub-components (:func:`~responsebot.utils.handler_utils.discover_handler_classes`, \
:func:`~responsebot.utils.auth_utils.auth`, :class:`~responsebot.responsebot_stream.ResponseBotStream`, etc.) |
def handle_error(self, error):
logging.exception("try to sleep if there are repeating errors.")
error_desc = str(error)
now = datetime.datetime.now()
if error_desc not in self.error_time_log:
self.error_time_log[error_desc] = now
return
time_of_last_encounter = self.error_time_log[str(error)]
time_since_last_encounter = now - time_of_last_encounter
if time_since_last_encounter.total_seconds() > self.config.get('min_seconds_between_errors'):
self.error_time_log[error_desc] = now
return
if error_desc not in self.error_sleep_log:
time.sleep(self.config.get('sleep_seconds_on_consecutive_errors'))
self.error_sleep_log[error_desc] = 1
else:
sys.exit() | Try to detect repetitive errors and sleep for a while to avoid being marked as spam |
def parse_isodate(datestr):
m = isodate_rx.search(datestr)
assert m, 'unrecognized date format: ' + datestr
year, month, day = m.group('year', 'month', 'day')
hour, minute, second, fraction = m.group('hour', 'minute', 'second', 'fraction')
tz, tzhh, tzmm = m.group('tz', 'tzhh', 'tzmm')
dt = datetime.datetime(int(year), int(month), int(day), int(hour))
if fraction is None:
fraction = 0
else:
fraction = float('0.' + fraction)
if minute is None:
dt = dt.replace(minute=int(60 * fraction))
else:
dt = dt.replace(minute=int(minute))
if second is None:
dt = dt.replace(second=int(60 * fraction))
else:
dt = dt.replace(second=int(second), microsecond=int(1000000 * fraction))
if tz is not None:
if tz[0] == 'Z':
offset = 0
else:
offset = datetime.timedelta(minutes=int(tzmm or 0), hours=int(tzhh))
if tz[0] == '-':
offset = -offset
dt = dt.replace(tzinfo=UTCOffset(offset))
return dt | Parse a string that loosely fits ISO 8601 formatted date-time string |
def ls(
self, rev, path, recursive=False, recursive_dirs=False,
directory=False, report=()
):
raise NotImplementedError | List directory or file
:param rev: The revision to use.
:param path: The path to list. May start with a '/' or not. Directories
may end with a '/' or not.
:param recursive: Recursively list files in subdirectories.
:param recursive_dirs: Used when recursive=True, also list directories.
:param directory: If path is a directory, list path itself instead of
its contents.
:param report: A list or tuple of extra attributes to return that may
require extra processing. Recognized values are 'size',
'target', 'executable', and 'commit'.
Returns a list of dictionaries with the following keys:
**type**
The type of the file: 'f' for file, 'd' for directory, 'l' for
symlink.
**name**
The name of the file. Not present if directory=True.
**size**
The size of the file. Only present for files when 'size' is in
report.
**target**
The target of the symlink. Only present for symlinks when
'target' is in report.
**executable**
True if the file is executable, False otherwise. Only present
for files when 'executable' is in report.
Raises PathDoesNotExist if the path does not exist. |
def log(
self, revrange=None, limit=None, firstparent=False, merges=None,
path=None, follow=False
):
raise NotImplementedError | Get commit logs
:param revrange: Either a single revision or a range of revisions as a
2-element list or tuple.
:param int limit: Limit the number of log entries.
:param bool firstparent: Only follow the first parent of merges.
:param bool merges: True means only merges, False means no merges,
None means both merges and non-merges.
:param str path: Only match commits containing changes on this path.
:param bool follow: Follow file history across renames.
:returns: log information
:rtype: :class:`CommitLogEntry` or list of :class:`CommitLogEntry`
If revrange is None, return a list of all log entries in reverse
chronological order.
If revrange is a single revision, return a single log entry.
If revrange is a 2 element list [A,B] or tuple (A,B), return a list of log
entries starting at B and following that branch back to A or one of its
ancestors (not inclusive. If A is None, follow branch B back to the
beginning of history. If B is None, list all descendants in reverse
chronological order. |
def user_create(self, cloudflare_email, cloudflare_pass, unique_id=None):
params = {
'act': 'user_create',
'cloudflare_email': cloudflare_email,
'cloudflare_pass': cloudflare_pass
}
if unique_id:
params['unique_id'] = unique_id
return self._request(params) | Create new cloudflare user with selected email and id. Optionally also
select unique_id which can be then used to get user information.
:param cloudflare_email: new user cloudflare email
:type cloudflare_email: str
:param cloudflare_pass: new user cloudflare password
:type cloudflare_pass: str
:param unique_id: new user unique id
:type unique_id: str (optional)
:returns:
:rtype: dict |
def zone_set(self, user_key, zone_name, resolve_to, subdomains):
params = {
'act': 'zone_set',
'user_key': user_key,
'zone_name': zone_name,
'resolve_to': resolve_to,
'subdomains': subdomains,
}
return self._request(params) | Create new zone for user associated with this user_key.
:param user_key: The unique 3auth string,identifying the user's
CloudFlare Account. Generated from a user_create or user_auth
:type user_key: str
:param zone_name: The zone you'd like to run CNAMES through CloudFlare for, e.g. "example.com".
:type zone_name: str
:param resolve_to: The CNAME that CloudFlare should ultimately
resolve web connections to after they have been filtered
:type resolve_to: str
:param subdomains: A comma-separated string of subdomain(s) that
CloudFlare should host, e.g. "www,blog,forums"
:type subdomains: str
:returns:
:rtype: dict |
def full_zone_set(self, user_key, zone_name):
params = {
'act': 'full_zone_set',
'user_key': user_key,
'zone_name': zone_name,
}
return self._request(params) | Create new zone and all subdomains for user associated with this
user_key.
:param user_key: The unique 3auth string,identifying the user's
CloudFlare Account. Generated from a user_create or user_auth
:type user_key: str
:param zone_name: The zone you'd like to run CNAMES through CloudFlare for, e.g. "example.com".
:type zone_name: str
:returns:
:rtype: dict |
def user_lookup(self, cloudflare_email=None, unique_id=None):
if not cloudflare_email and not unique_id:
raise KeyError(
'Either cloudflare_email or unique_id must be present')
params = {'act': 'user_lookup'}
if cloudflare_email:
params['cloudflare_email'] = cloudflare_email
else:
params['unique_id'] = unique_id
return self._request(params) | Lookup user data based on either his cloudflare_email or his
unique_id.
:param cloudflare_email: email associated with user
:type cloudflare_email: str
:param unique_id: unique id associated with user
:type unique_id: str
:returns:
:rtype: dict |
def user_auth(
self,
cloudflare_email=None,
cloudflare_pass=None,
unique_id=None
):
if not (cloudflare_email and cloudflare_pass) and not unique_id:
raise KeyError(
'Either cloudflare_email and cloudflare_pass or unique_id must be present')
params = {'act': 'user_auth'}
if cloudflare_email and cloudflare_pass:
params['cloudflare_email'] = cloudflare_email
params['cloudflare_pass'] = cloudflare_pass
if unique_id:
params['unique_id'] = unique_id
return self._request(params) | Get user_key based on either his email and password or unique_id.
:param cloudflare_email: email associated with user
:type cloudflare_email: str
:param cloudflare_pass: pass associated with user
:type cloudflare_pass: str
:param unique_id: unique id associated with user
:type unique_id: str
:returns:
:rtype: dict |
def zone_list(
self,
user_key,
limit=100,
offset=0,
zone_name=None,
sub_id=None,
zone_status='ALL',
sub_status='ALL',
):
if zone_status not in ['V', 'D', 'ALL']:
raise ValueError('zone_status has to be V, D or ALL')
if sub_status not in ['V', 'CNL', 'ALL']:
raise ValueError('sub_status has to be V, CNL or ALL')
params = {
'act': 'zone_list',
'user_key': user_key,
'limit': limit,
'offset': offset,
'zone_status': zone_status,
'sub_status': sub_status
}
if zone_name:
params['zone_name'] = zone_name
if sub_id:
params['sub_id'] = sub_id
return self._request(params) | List zones for a user.
:param user_key: key for authentication of user
:type user_key: str
:param limit: limit of zones shown
:type limit: int
:param offset: offset of zones to be shown
:type offset: int
:param zone_name: name of zone to lookup
:type zone_name: str
:param sub_id: subscription id of reseller (only for use by resellers)
:type sub_id: str
:param zone_status: status of zones to be shown
:type zone_status: str (one of: V(active), D(deleted), ALL)
:param sub_status: status of subscription of zones to be shown
:type zone_name: str (one of: V(active), CNL(cancelled), ALL )
:returns:
:rtype: dict |
def attr_exists(self, attr):
gen = self.attr_gen(attr)
n_instances = len(list(gen))
if n_instances > 0:
return True
else:
return False | Returns True if at least on instance of the attribute is found |
def datasets(self):
HiisiHDF._clear_cache()
self.visititems(HiisiHDF._is_dataset)
return HiisiHDF.CACHE['dataset_paths'] | Method returns a list of dataset paths.
Examples
--------
>>> for dataset in h5f.datasets():
print(dataset)
'/dataset1/data1/data'
'/dataset1/data2/data'
'/dataset2/data1/data'
'/dataset2/data2/data' |
def groups(self):
HiisiHDF._clear_cache()
self.CACHE['group_paths'].append('/')
self.visititems(HiisiHDF._is_group)
return HiisiHDF.CACHE['group_paths'] | Method returns a list of all goup paths
Examples
--------
>>> for group in h5f.groups():
print(group)
'/'
'/dataset1'
'/dataset1/data1'
'/dataset1/data2' |
def attr_gen(self, attr):
HiisiHDF._clear_cache()
HiisiHDF.CACHE['search_attribute'] = attr
HiisiHDF._find_attr_paths('/', self['/']) # Check root attributes
self.visititems(HiisiHDF._find_attr_paths)
path_attr_gen = (PathValue(attr_path, self[attr_path].attrs.get(attr)) for attr_path in HiisiHDF.CACHE['attribute_paths'])
return path_attr_gen | Returns attribute generator that yields namedtuples containing
path value pairs
Parameters
----------
attr : str
Name of the search attribute
Returns
-------
attr_generator : generator
Returns a generator that yields named tuples with field names
path and value.
Examples
--------
>>> gen = h5f.attr_gen('elangle')
>>> pair = next(gen)
>>> print(pair.path)
'/dataset1/where'
>>> print(pair.value)
0.5 |
def create_from_filedict(self, filedict):
if self.mode in ['r+','w', 'w-', 'x', 'a']:
for h5path, path_content in filedict.iteritems():
if path_content.has_key('DATASET'):
# If path exist, write only metadata
if h5path in self:
for key, value in path_content.iteritems():
if key != 'DATASET':
self[h5path].attrs[key] = value
else:
try:
group = self.create_group(os.path.dirname(h5path))
except ValueError:
group = self[os.path.dirname(h5path)]
pass # This pass has no effect?
new_dataset = group.create_dataset(os.path.basename(h5path), data=path_content['DATASET'])
for key, value in path_content.iteritems():
if key != 'DATASET':
new_dataset.attrs[key] = value
else:
try:
group = self.create_group(h5path)
except ValueError:
group = self[h5path]
for key, value in path_content.iteritems():
group.attrs[key] = value | Creates h5 file from dictionary containing the file structure.
Filedict is a regular dictinary whose keys are hdf5 paths and whose
values are dictinaries containing the metadata and datasets. Metadata
is given as normal key-value -pairs and dataset arrays are given using
'DATASET' key. Datasets must be numpy arrays.
Method can also be used to append existing hdf5 file. If the file is
opened in read only mode, method does nothing.
Examples
--------
Create newfile.h5 and fill it with data and metadata
>>> h5f = HiisiHDF('newfile.h5', 'w')
>>> filedict = {'/':{'attr1':'A'},
'/dataset1/data1/data':{'DATASET':np.zeros(100), 'quantity':'emptyarray'}, 'B':'b'}
>>> h5f.create_from_filedict(filedict) |
def search(self, attr, value, tolerance=0):
found_paths = []
gen = self.attr_gen(attr)
for path_attr_pair in gen:
# if attribute is numerical use numerical_value_tolerance in
# value comparison. If attribute is string require exact match
if isinstance(path_attr_pair.value, str):
type_name = 'str'
else:
type_name = path_attr_pair.value.dtype.name
if 'int' in type_name or 'float' in type_name:
if abs(path_attr_pair.value - value) <= tolerance:
found_paths.append(path_attr_pair.path)
else:
if path_attr_pair.value == value:
found_paths.append(path_attr_pair.path)
return found_paths | Find paths with a key value match
Parameters
----------
attr : str
name of the attribute
value : str or numerical value
value of the searched attribute
Keywords
--------
tolerance : float
tolerance used when searching for matching numerical
attributes. If the value of the attribute found from the file
differs from the searched value less than the tolerance, attributes
are considered to be the same.
Returns
-------
results : list
a list of all matching paths
Examples
--------
>>> for result in h5f.search('elangle', 0.5, 0.1):
print(result)
'/dataset1/where'
>>> for result in h5f.search('quantity', 'DBZH'):
print(result)
'/dataset1/data2/what'
'/dataset2/data2/what'
'/dataset3/data2/what'
'/dataset4/data2/what'
'/dataset5/data2/what' |
def _correctIsotopeImpurities(matrix, intensities):
correctedIntensities, _ = scipy.optimize.nnls(matrix, intensities)
return correctedIntensities | Corrects observed reporter ion intensities for isotope impurities.
:params matrix: a matrix (2d nested list) containing numbers, each isobaric
channel must be present as a COLUMN. Use maspy.isobar._transposeMatrix()
if channels are written in rows.
:param intensities: numpy array of observed reporter ion intensities.
:returns: a numpy array of reporter ion intensities corrected for isotope
impurities. |
def _normalizeImpurityMatrix(matrix):
newMatrix = list()
for line in matrix:
total = sum(line)
if total != 0:
newMatrix.append([i / total for i in line])
else:
newMatrix.append(line)
return newMatrix | Normalize each row of the matrix that the sum of the row equals 1.
:params matrix: a matrix (2d nested list) containing numbers, each isobaric
channel must be present as a row.
:returns: a matrix containing normalized values |
def _padImpurityMatrix(matrix, preChannels, postChannels):
extendedMatrix = list()
lastMatrixI = len(matrix)-1
for i, line in enumerate(matrix):
prePadding = itertools.repeat(0., i)
postPadding = itertools.repeat(0., lastMatrixI-i)
newLine = list(itertools.chain(prePadding, line, postPadding))
extendedMatrix.append(newLine[preChannels:-postChannels])
return extendedMatrix | Align the values of an isotope impurity matrix and fill up with 0.
NOTE:
The length of the rows in the "matrix" must be the sum of "preChannels"
and "postChannels" + 1.
:params matrix: a matrix (2d nested list) containing numbers, each isobaric
channel must be present as a row.
:params preChannels: number of matrix columns with a nominal mass shift < 0
(-1, -2,..) in respect to the reporter ion mz value.
:params postChannels: number of matrix columns with a nominal mass shift > 0
(+1, +2,..) in respect to the reporter ion mz value.
:returns: extended matrix, where the number of rows is unchanged but the
length of each row is extend to the number of rows. |
def _processImpurityMatrix(self):
processedMatrix = _normalizeImpurityMatrix(self.impurityMatrix)
processedMatrix = _padImpurityMatrix(
processedMatrix, self.matrixPreChannels, self.matrixPostChannels
)
processedMatrix = _transposeMatrix(processedMatrix)
return processedMatrix | Process the impurity matrix so that it can be used to correct
observed reporter intensities. |
def exception(message):
def decorator(method):
"""Inner decorator so we can accept arguments."""
@wraps(method)
def wrapper(self, *args, **kwargs):
"""Innermost decorator wrapper - this is confusing."""
if self.messages:
kwargs['message'] = args[0] if args else kwargs.get('message', message)
else:
kwargs['message'] = None
kwargs['prefix'] = self.prefix
kwargs['statsd'] = self.statsd
return method(self, **kwargs)
return wrapper
return decorator | Exception method convenience wrapper. |
def to_dict(self):
val = dict(self.payload or ())
if self.message:
val['message'] = self.message
return val | Convert Exception class to a Python dictionary. |
def init_app(self, app, config=None, statsd=None):
if config is not None:
self.config = config
elif self.config is None:
self.config = app.config
self.messages = self.config.get('EXCEPTION_MESSAGE', True)
self.prefix = self.config.get('EXCEPTION_PREFIX', DEFAULT_PREFIX)
self.statsd = statsd | Init Flask Extension. |
def program(self):
statements = []
if self.cur_token.type == TokenTypes.NEW_LINE:
self.eat(TokenTypes.NEW_LINE)
while self.cur_token.type != TokenTypes.EOF:
statements += [self.statement()]
return Block(statements) | program : (newline) statement
| program statement |
def statement(self):
if self.cur_token.type == TokenTypes.VAR:
self.tokenizer.start_saving(self.cur_token)
self.variable()
peek_var = self.cur_token
self.tokenizer.replay()
self.eat()
if peek_var.type == TokenTypes.ASSIGN:
return self.assign_statement()
else:
return self.expression()
elif self.cur_token.type in TokenTypes.control(self.features):
return self.control()
elif self.cur_token.type in TokenTypes.loop(self.features):
return self.loop()
elif self.cur_token.type in TokenTypes.func(self.features):
if self.cur_token.type == TokenTypes.FUNC:
return self.func()
elif self.cur_token.type == TokenTypes.RETURN:
return self.return_statement()
self.error("Invalid token or unfinished statement") | statement : assign_statement
| expression
| control
| empty
Feature For Loop adds:
| loop
Feature Func adds:
| func
| return statement |
def assign_statement(self):
left = self.variable()
op = self.cur_token
self.eat(TokenTypes.ASSIGN)
right = self.expression()
smt = None
if Features.TYPE_ARRAY in self.features and isinstance(left, GetArrayItem):
# Remake this as a setitem.
smt = SetArrayItem(left.left, left.right, right)
else:
smt = Assign(op, left, right)
if self.cur_token.type == TokenTypes.SEMI_COLON:
self.eat(TokenTypes.SEMI_COLON)
return smt | assign smt : variable ASSIGN expression(;)
Feature Type Array adds:
| variable SETITEM expression(;) |
def control(self):
self.eat(TokenTypes.IF)
ctrl = self.expression()
block = self.block()
ifs = [If(ctrl, block)]
else_block = Block()
while self.cur_token.type == TokenTypes.ELIF:
self.eat(TokenTypes.ELIF)
ctrl = self.expression()
block = self.block()
ifs.append(If(ctrl, block))
if self.cur_token.type == TokenTypes.ELSE:
self.eat(TokenTypes.ELSE)
else_block = self.block()
return ControlBlock(ifs, else_block) | control : 'if' ctrl_exp block ('elif' ctrl_exp block)* ('else' block) |
def loop(self):
self.eat(TokenTypes.FOR_LOOP)
init = NoOp()
if self.cur_token.type != TokenTypes.SEMI_COLON:
init = self.assign_statement()
else:
self.eat(TokenTypes.SEMI_COLON)
ctrl = NoOp()
if self.cur_token.type != TokenTypes.SEMI_COLON:
ctrl = self.expression()
self.eat(TokenTypes.SEMI_COLON)
inc = NoOp()
if self.cur_token.type != TokenTypes.LBRACE:
inc = self.assign_statement()
block = self.block()
return ForLoop(init, ctrl, inc, block) | loop : 'for' init; ctrl; inc block |
def func(self):
self.eat(TokenTypes.FUNC)
name = Var(self.cur_token)
self.eat(TokenTypes.VAR)
self.eat(TokenTypes.LPAREN)
sig = self.param_list()
self.eat(TokenTypes.RPAREN)
block = self.block()
return FunctionDef(name, Function(sig, block)) | func : func name(paramlist) block |
def param_list(self):
params = []
while self.cur_token.type == TokenTypes.VAR:
params.append(Var(self.cur_token))
self.eat(TokenTypes.VAR)
if self.cur_token.type == TokenTypes.COMMA:
self.eat(TokenTypes.COMMA)
return FunctionSig(params) | paramlist : var, paramlist
paramlist : var
paramlist : |
def arg_list(self, ending_char=TokenTypes.RPAREN):
args = []
while not self.cur_token.type == ending_char:
args.append(self.expression())
if self.cur_token.type == TokenTypes.COMMA:
self.eat(TokenTypes.COMMA)
return args | arglist : expression, arglist
arglist : expression
arglist : |
def array_const(self):
self.eat(TokenTypes.LBRACKET)
node = Array(self.arg_list(TokenTypes.RBRACKET))
self.eat(TokenTypes.RBRACKET)
return node | Feature Type Array adds:
array : [ arglist ] |
def block(self):
statements = []
self.eat(TokenTypes.LBRACE)
if self.cur_token.type == TokenTypes.NEW_LINE:
self.eat(TokenTypes.NEW_LINE)
while self.cur_token.type != TokenTypes.RBRACE:
statements.append(self.statement())
self.eat(TokenTypes.RBRACE)
if self.cur_token.type == TokenTypes.NEW_LINE:
self.eat(TokenTypes.NEW_LINE)
return Block(statements) | block : { (newline) statements } (newline) |
def variable(self):
var = Var(self.cur_token)
self.eat(TokenTypes.VAR)
if Features.TYPE_ARRAY in self.features:
while self.cur_token.type == TokenTypes.LBRACKET:
self.eat(TokenTypes.LBRACKET)
# Start passed the logical ops.
expr = self.operator_expression(level=2)
self.eat(TokenTypes.RBRACKET)
var = GetArrayItem(left=var, right=expr)
if Features.FUNC in self.features:
if self.cur_token.type == TokenTypes.LPAREN:
self.eat(TokenTypes.LPAREN)
args = self.arg_list()
self.eat(TokenTypes.RPAREN)
var = Call(var, args)
return var | variable : variable
Feature Type Array adds:
variable : variable[expression]
Feature Type Func adds:
variable : variable(arg_list) |
def wrap_node(self, node, options):
'''\
celery registers tasks by decorating them, and so do we, so the user
can pass a celery task and we'll wrap our code with theirs in a nice
package celery can execute.
'''
if 'celery_task' in options:
return options['celery_task'](node)
return self.celery_task(nodef wrap_node(self, node, options):
'''\
celery registers tasks by decorating them, and so do we, so the user
can pass a celery task and we'll wrap our code with theirs in a nice
package celery can execute.
'''
if 'celery_task' in options:
return options['celery_task'](node)
return self.celery_task(node) | \
celery registers tasks by decorating them, and so do we, so the user
can pass a celery task and we'll wrap our code with theirs in a nice
package celery can execute. |
def bfs(graph, start):
# maintain a queue of paths
queue = []
visited = []
# maintain a queue of nodes
# push the first path into the queue
queue.append([['', start]])
while queue:
# get the first path from the queue
path = queue.pop(0)
# get the last node from the path
node = path[-1][1]
if node.stateid not in visited:
visited.append(node.stateid)
# path found
if node.final != TropicalWeight(float('inf')):
return "".join([mnode[0] for mnode in path])
# enumerate all adjacent nodes, construct a new path and push
# it into the queue
for arc in node.arcs:
char = graph.isyms.find(arc.ilabel)
next_state = graph[arc.nextstate]
# print next_state.stateid
if next_state.stateid not in visited:
new_path = list(path)
new_path.append([char, next_state])
queue.append(new_path) | Finds the shortest string using BFS
Args:
graph (DFA): The DFA states
start (DFA state): The DFA initial state
Returns:
str: The shortest string |
def run():
# We override the program name to reflect that this script must be run with
# the python executable.
parser = argparse.ArgumentParser(
prog='python -m braillegraph',
description='Print a braille bar graph of the given integers.'
)
# This flag sets the end string that we'll print. If we pass end=None to
# print(), it will use its default. If we pass end='', it will suppress the
# newline character.
parser.add_argument('-n', '--no-newline', action='store_const',
dest='end', const='', default=None,
help='do not print the trailing newline character')
# Add subparsers for the directions
subparsers = parser.add_subparsers(title='directions')
horizontal_parser = subparsers.add_parser('horizontal',
help='a horizontal graph')
horizontal_parser.set_defaults(
func=lambda args: horizontal_graph(args.integers)
)
horizontal_parser.add_argument('integers', metavar='N', type=int,
nargs='+', help='an integer')
vertical_parser = subparsers.add_parser('vertical',
help='a vertical graph')
vertical_parser.set_defaults(
func=lambda args: vertical_graph(args.integers, sep=args.sep)
)
vertical_parser.add_argument('integers', metavar='N', type=int, nargs='+',
help='an integer')
# The separator for groups of bars (i.e., "lines"). If we pass None,
# vertical_parser will use its default.
vertical_parser.add_argument('-s', '--sep', action='store', default=None,
help='separator for groups of bars')
args = parser.parse_args()
print(args.func(args), end=args.end) | Display the arguments as a braille graph on standard output. |
def _rnd_date(start, end):
return date.fromordinal(random.randint(start.toordinal(), end.toordinal())) | Internal random date generator. |
def rnd_date(start=date(1970, 1, 1), end=None, **kwargs):
if end is None:
end = date.today()
start = parser.parse_date(start)
end = parser.parse_date(end)
_assert_correct_start_end(start, end)
return _rnd_date(start, end) | Generate a random date between ``start`` to ``end``.
:param start: Left bound
:type start: string or datetime.date, (default date(1970, 1, 1))
:param end: Right bound
:type end: string or datetime.date, (default date.today())
:return: a datetime.date object
**中文文档**
随机生成一个位于 ``start`` 和 ``end`` 之间的日期。 |
def rnd_date_array(size, start=date(1970, 1, 1), end=None, **kwargs):
if end is None:
end = date.today()
start = parser.parse_date(start)
end = parser.parse_date(end)
_assert_correct_start_end(start, end)
return _randn(size, _rnd_date, start, end) | Array or Matrix of random date generator.
:returns: 1d or 2d array of datetime.date |
def rnd_date_list_high_performance(size, start=date(1970, 1, 1), end=None, **kwargs):
if end is None:
end = date.today()
start_days = to_ordinal(parser.parse_datetime(start))
end_days = to_ordinal(parser.parse_datetime(end))
_assert_correct_start_end(start_days, end_days)
if has_np: # pragma: no cover
return [
from_ordinal(days)
for days in np.random.randint(start_days, end_days, size)
]
else:
return [
from_ordinal(random.randint(start_days, end_days))
for _ in range(size)
] | Generate mass random date.
:param size: int, number of
:param start: date similar object, int / str / date / datetime
:param end: date similar object, int / str / date / datetime, default today's date
:param kwargs: args placeholder
:return: list of datetime.date |
def rnd_datetime(start=datetime(1970, 1, 1), end=datetime.now()):
start = parser.parse_datetime(start)
end = parser.parse_datetime(end)
_assert_correct_start_end(start, end)
return _rnd_datetime(start, end) | Generate a random datetime between ``start`` to ``end``.
:param start: Left bound
:type start: string or datetime.datetime, (default datetime(1970, 1, 1))
:param end: Right bound
:type end: string or datetime.datetime, (default datetime.now())
:return: a datetime.datetime object
**中文文档**
随机生成一个位于 ``start`` 和 ``end`` 之间的时间。 |
def rnd_datetime_array(size, start=datetime(1970, 1, 1), end=None):
if end is None:
end = datetime.now()
start = parser.parse_datetime(start)
end = parser.parse_datetime(end)
_assert_correct_start_end(start, end)
return _randn(size, _rnd_datetime, start, end) | Array or Matrix of random datetime generator.
:returns: 1d or 2d array of datetime.date |
def day_interval(year, month, day, milliseconds=False, return_string=False):
if milliseconds: # pragma: no cover
delta = timedelta(milliseconds=1)
else:
delta = timedelta(seconds=1)
start = datetime(year, month, day)
end = datetime(year, month, day) + timedelta(days=1) - delta
if not return_string:
return start, end
else:
return str(start), str(end) | Return a start datetime and end datetime of a day.
:param milliseconds: Minimum time resolution.
:param return_string: If you want string instead of datetime, set True
Usage Example::
>>> start, end = rolex.day_interval(2014, 6, 17)
>>> start
datetime(2014, 6, 17, 0, 0, 0)
>>> end
datetime(2014, 6, 17, 23, 59, 59) |
def month_interval(year, month, milliseconds=False, return_string=False):
if milliseconds: # pragma: no cover
delta = timedelta(milliseconds=1)
else:
delta = timedelta(seconds=1)
if month == 12:
start = datetime(year, month, 1)
end = datetime(year + 1, 1, 1) - delta
else:
start = datetime(year, month, 1)
end = datetime(year, month + 1, 1) - delta
if not return_string:
return start, end
else:
return str(start), str(end) | Return a start datetime and end datetime of a month.
:param milliseconds: Minimum time resolution.
:param return_string: If you want string instead of datetime, set True
Usage Example::
>>> start, end = rolex.month_interval(2000, 2)
>>> start
datetime(2000, 2, 1, 0, 0, 0)
>>> end
datetime(2000, 2, 29, 23, 59, 59) |
def year_interval(year, milliseconds=False, return_string=False):
if milliseconds: # pragma: no cover
delta = timedelta(milliseconds=1)
else:
delta = timedelta(seconds=1)
start = datetime(year, 1, 1)
end = datetime(year + 1, 1, 1) - delta
if not return_string:
return start, end
else:
return str(start), str(end) | Return a start datetime and end datetime of a year.
:param milliseconds: Minimum time resolution.
:param return_string: If you want string instead of datetime, set True
Usage Example::
>>> start, end = rolex.year_interval(2007)
>>> start
datetime(2007, 1, 1, 0, 0, 0)
>>> end
datetime(2007, 12, 31, 23, 59, 59) |
def renderfile(filename,
options=None,
templatePaths=None,
default='',
silent=False):
if not mako:
logger.debug('mako is not installed')
return default
if not mako:
logger.debug('mako is not installed.')
return default
if templatePaths is None:
templatePaths = []
# use the default mako templates
basepath = os.environ.get('MAKO_TEMPLATEPATH', '')
if basepath:
basetempls = basepath.split(os.path.pathsep)
else:
basetempls = []
templatePaths += basetempls
# include the root path
templatePaths.insert(0, os.path.dirname(filename))
templatePaths = map(lambda x: x.replace('\\', '/'), templatePaths)
# update the default options
scope = dict(os.environ)
scope['projex_text'] = projex.text
scope['date'] = date
scope['datetime'] = datetime
scope.update(_macros)
scope.update(os.environ)
if options is not None:
scope.update(options)
old_env_path = os.environ.get('MAKO_TEMPLATEPATH', '')
os.environ['MAKO_TEMPLATEPATH'] = os.path.pathsep.join(templatePaths)
logger.debug('rendering mako file: %s', filename)
if templatePaths:
lookup = mako.lookup.TemplateLookup(directories=templatePaths)
templ = mako.template.Template(filename=filename, lookup=lookup)
else:
templ = mako.template.Template(filename=filename)
try:
output = templ.render(**scope)
except StandardError:
output = default
if not silent:
logger.exception('Error rendering mako text')
os.environ['MAKO_TEMPLATEPATH'] = old_env_path
return output | Renders a file to text using the mako template system.
To learn more about mako and its usage, see [[www.makotemplates.org]]
:return <str> formatted text |
def collectfiles(path, filt=None):
if not os.path.isdir(path):
path = os.path.dirname(path)
output = []
for name in sorted(os.listdir(path)):
filepath = os.path.join(path, name)
if os.path.isfile(filepath):
if not filt or filt(name):
output.append((name, filepath))
return output | Collects some files based on the given filename.
:param path | <str>
filt | <method>
:return [(<str> name, <str> filepath), ..] |
def get_milestone(self, title):
if not title:
return GithubObject.NotSet
if not hasattr(self, '_milestones'):
self._milestones = {m.title: m for m in self.repo.get_milestones()}
milestone = self._milestones.get(title)
if not milestone:
milestone = self.repo.create_milestone(title=title)
return milestone | given the title as str, looks for an existing milestone or create a new one,
and return the object |
def get_assignee(self, login):
if not login:
return GithubObject.NotSet
if not hasattr(self, '_assignees'):
self._assignees = {c.login: c for c in self.repo.get_assignees()}
if login not in self._assignees:
# warning
print("{} doesn't belong to this repo. This issue won't be assigned.".format(login))
return self._assignees.get(login) | given the user login, looks for a user in assignee list of the repo
and return it if was found. |
def sender(self, issues):
for issue in issues:
state = self.get_state(issue.state)
if issue.number:
try:
gh_issue = self.repo.get_issue(issue.number)
original_state = gh_issue.state
if original_state == state:
action = 'Updated'
elif original_state == 'closed':
action = 'Reopened'
else:
action = 'Closed'
gh_issue.edit(title=issue.title,
body=issue.body,
labels=issue.labels,
milestone=self.get_milestone(issue.milestone),
assignee=self.get_assignee(issue.assignee),
state=self.get_state(issue.state)
)
print('{} #{}: {}'.format(action, gh_issue.number, gh_issue.title))
except GithubException:
print('Not found #{}: {} (ignored)'.format(issue.number, issue.title))
continue
else:
gh_issue = self.repo.create_issue(title=issue.title,
body=issue.body,
labels=issue.labels,
milestone=self.get_milestone(issue.milestone),
assignee=self.get_assignee(issue.assignee))
print('Created #{}: {}'.format(gh_issue.number, gh_issue.title)) | push a list of issues to github |
def define(self, key, value):
skey = nstr(key)
self._defaults[skey] = value
self[skey] = value | Defines the value for the inputted key by setting both its default and \
value to the inputted value.
:param key | <str>
value | <variant> |
def toXml(self, xparent):
for key, value in self.items():
elem = ElementTree.SubElement(xparent, 'entry')
typ = type(elem).__name__
elem.set('key', key)
elem.set('type', typ)
if typ in DataSet._xmlTypes:
DataSet._xmlTypes[typ][0](elem, value)
else:
elem.set('value', nstr(value)) | Saves the settings for this dataset to the inputted parent xml.
:param xparent | <xml.etree.ElementTree.Element> |
def fromXml(cls, xparent):
output = cls()
for xentry in xparent:
key = xentry.get('key')
if not key:
continue
typ = xentry.get('type', 'str')
if typ in DataSet._xmlTypes:
value = DataSet._xmlTypes[typ][1](xentry)
else:
value = xentry.get('value', '')
output.define(key, value)
return output | Loads the settings for this dataset to the inputted parent xml.
:param xparent | <xml.etree.ElementTree.Element> |
def registerXmlType(typ, encoder, decoder):
DataSet._xmlTypes[nstr(typ)] = (encoder, decoder) | Registers a data type to encode/decode for xml settings.
:param typ | <object>
encoder | <method>
decoder | <method> |
def wrap_node(self, node, options):
'''
we have the option to construct nodes here, so we can use different
queues for nodes without having to have different queue objects.
'''
job_kwargs = {
'queue': options.get('queue', 'default'),
'connection': options.get('connection', self.redis_connection),
'timeout': options.get('timeout', None),
'result_ttl': options.get('result_ttl', 500),
}
return job(**job_kwargs)(nodef wrap_node(self, node, options):
'''
we have the option to construct nodes here, so we can use different
queues for nodes without having to have different queue objects.
'''
job_kwargs = {
'queue': options.get('queue', 'default'),
'connection': options.get('connection', self.redis_connection),
'timeout': options.get('timeout', None),
'result_ttl': options.get('result_ttl', 500),
}
return job(**job_kwargs)(node) | we have the option to construct nodes here, so we can use different
queues for nodes without having to have different queue objects. |
def _writeMzmlIndexList(xmlWriter, spectrumIndexList, chromatogramIndexList):
counts = 0
if spectrumIndexList:
counts += 1
if chromatogramIndexList:
counts += 1
if counts == 0:
return None
#Create indexList node
xmlIndexList = xmlWriter.element('indexList', {'count': str(counts)})
xmlIndexList.__enter__()
xmlWriter.write('\n')
_writeIndexListElement(xmlWriter, 'spectrum', spectrumIndexList)
_writeIndexListElement(xmlWriter, 'chromatogram', chromatogramIndexList)
#Close indexList node
xmlIndexList.__exit__(None, None, None)
xmlWriter.write('\n') | #TODO: docstring
:param xmlWriter: #TODO: docstring
:param spectrumIndexList: #TODO: docstring
:param chromatogramIndexList: #TODO: docstring |
def _writeIndexListElement(xmlWriter, elementName, indexList):
if indexList:
xmlIndex = xmlWriter.element('index', {'name': elementName})
xmlIndex.__enter__()
xmlWriter.write('\n')
for offset, indexId in indexList:
offsetElement = ETREE.Element('offset', {'idRef': indexId})
offsetElement.text = str(offset)
xmlWriter.write(offsetElement, pretty_print=True)
xmlIndex.__exit__(None, None, None)
xmlWriter.write('\n') | #TODO: docstring
:param xmlWriter: #TODO: docstring
:param elementName: #TODO: docstring
:param indexList: #TODO: docstring |
def _writeMzmlChecksum(xmlWriter, outputFile):
sha = hashlib.sha1(outputFile.getvalue())
sha.update('<fileChecksum>')
xmlChecksumElement = ETREE.Element('fileChecksum')
xmlChecksumElement.text = sha.hexdigest()
xmlWriter.write(xmlChecksumElement, pretty_print=True) | #TODO: docstring
:param xmlWriter: #TODO: docstring
:param outputFile: #TODO: docstring |
def _writeIndexListOffset(xmlWriter, offset):
xmlIndexListOffset = ETREE.Element('indexListOffset')
xmlIndexListOffset.text = str(offset)
xmlWriter.write(xmlIndexListOffset, pretty_print=True) | #TODO: docstring
:param xmlWriter: #TODO: docstring
:param offset: #TODO: docstring |
def xmlGenScanList(scanList, scanListParams):
numEntries = len(scanList)
xmlScanList = ETREE.Element('scanList', {'count': str(numEntries)})
maspy.xml.xmlAddParams(xmlScanList, scanListParams)
for scan in scanList:
#Note: no attributes supported
xmlScan = ETREE.Element('scan', {})
maspy.xml.xmlAddParams(xmlScan, scan['params'])
#Generate the scanWindowList entry
numScanWindows = len(scan['scanWindowList'])
if numScanWindows > 0:
xmlScanWindowList = ETREE.Element('scanWindowList',
{'count': str(numScanWindows)}
)
for scanWindow in scan['scanWindowList']:
xmlScanWindow = ETREE.Element('scanWindow')
maspy.xml.xmlAddParams(xmlScanWindow, scanWindow)
xmlScanWindowList.append(xmlScanWindow)
xmlScan.append(xmlScanWindowList)
xmlScanList.append(xmlScan)
return xmlScanList | #TODO: docstring
:params scanList: #TODO: docstring
:params scanListParams: #TODO: docstring
:returns: #TODO: docstring |
def xmlGenPrecursorList(precursorList):
numEntries = len(precursorList)
xmlPrecursorList = ETREE.Element('precursorList',
{'count': str(numEntries)}
)
for precursor in precursorList:
#Note: no attributes for external referencing supported
precursorAttrib = {}
if precursor['spectrumRef'] is not None:
precursorAttrib.update({'spectrumRef': precursor['spectrumRef']})
xmlPrecursor = ETREE.Element('precursor', precursorAttrib)
#Add isolationWindow element
if precursor['isolationWindow'] is not None:
xmlIsolationWindow = ETREE.Element('isolationWindow')
maspy.xml.xmlAddParams(xmlIsolationWindow,
precursor['isolationWindow']
)
xmlPrecursor.append(xmlIsolationWindow)
#Add selectedIonList element
numSelectedIons = len(precursor['selectedIonList'])
if numSelectedIons > 0:
xmlSelectedIonList = ETREE.Element('selectedIonList',
{'count': str(numSelectedIons)}
)
for selectedIon in precursor['selectedIonList']:
xmlSelectedIon = ETREE.Element('selectedIon')
maspy.xml.xmlAddParams(xmlSelectedIon, selectedIon)
xmlSelectedIonList.append(xmlSelectedIon)
xmlPrecursor.append(xmlSelectedIonList)
#Add activation element
xmlActivation = ETREE.Element('activation')
maspy.xml.xmlAddParams(xmlActivation, precursor['activation'])
xmlPrecursor.append(xmlActivation)
xmlPrecursorList.append(xmlPrecursor)
return xmlPrecursorList | #TODO: docstring
:params precursorList: #TODO: docstring
:returns: #TODO: docstring |
def xmlSpectrumFromSmi(index, smi, sai=None, compression='zlib'):
if sai is not None:
arrayLength = [array.size for array in viewvalues(sai.arrays)]
if len(set(arrayLength)) != 1:
raise Exception('Unequal size for different array in sai.arrays')
else:
arrayLength = arrayLength[0]
else:
arrayLength = 0
spectrumAttrib = {'index': str(index), 'id': smi.attributes['id'],
'defaultArrayLength': str(arrayLength)}
xmlSpectrum = ETREE.Element('spectrum', **spectrumAttrib)
maspy.xml.xmlAddParams(xmlSpectrum, smi.params)
#Add the scanList
if len(smi.scanList) > 0:
xmlSpectrum.append(xmlGenScanList(smi.scanList, smi.scanListParams))
if len(smi.precursorList) > 0:
xmlSpectrum.append(xmlGenPrecursorList(smi.precursorList))
if len(smi.productList) > 0:
xmlSpectrum.append(xmlGenProductList(smi.productList))
if sai is not None:
xmlSpectrum.append(xmlGenBinaryDataArrayList(sai.arrayInfo,
sai.arrays,
compression=compression
))
return xmlSpectrum | #TODO: docstring
:param index: The zero-based, consecutive index of the spectrum in the
SpectrumList. (mzML specification)
:param smi: a SpectrumMetadataItem instance
:param sai: a SpectrumArrayItem instance, if none is specified no
binaryDataArrayList is written
:param compression: #TODO: docstring
:returns: #TODO: docstring |
def xmlChromatogramFromCi(index, ci, compression='zlib'):
arrayLength = [array.size for array in viewvalues(ci.arrays)]
if len(set(arrayLength)) != 1:
raise Exception('Unequal size for different array in sai.arrays')
else:
arrayLength = arrayLength[0]
chromatogramAttrib = {'index': str(index), 'id': ci.id,
'defaultArrayLength': str(arrayLength)}
if 'dataProcessingRef' in ci.attrib:
chromatogramAttrib.update({'dataProcessingRef': dataProcessingRef})
xmlChromatogram = ETREE.Element('chromatogram', **chromatogramAttrib)
maspy.xml.xmlAddParams(xmlChromatogram, ci.params)
#TODO: add appropriate functions for precursor and product
if ci.product is not None:
raise NotImplementedError()
if ci.precursor is not None:
raise NotImplementedError()
#Sort the array keys, that 'rt' is always the first, necessary for example
# for the software "SeeMS" to properly display chromatograms.
arrayTypes = set(ci.arrayInfo)
if 'rt' in arrayTypes:
arrayTypes.remove('rt')
arrayTypes = ['rt'] + list(arrayTypes)
else:
arrayTypes = list(arrayTypes)
xmlChromatogram.append(xmlGenBinaryDataArrayList(ci.arrayInfo,
ci.arrays,
compression=compression,
arrayTypes=arrayTypes
)
)
return xmlChromatogram | #TODO: docstring
:param index: #TODO: docstring
:param ci: #TODO: docstring
:param compression: #TODO: docstring
:returns: #TODO: docstring |
def uninstall_pgpm_from_db(self):
drop_schema_cascade_script = 'DROP SCHEMA {schema_name} CASCADE;'
if self._conn.closed:
self._conn = psycopg2.connect(self._connection_string, connection_factory=pgpm.lib.utils.db.MegaConnection)
cur = self._conn.cursor()
# get current user
cur.execute(pgpm.lib.utils.db.SqlScriptsHelper.current_user_sql)
current_user = cur.fetchone()[0]
# check if current user is a super user
cur.execute(pgpm.lib.utils.db.SqlScriptsHelper.is_superuser_sql)
is_cur_superuser = cur.fetchone()[0]
if not is_cur_superuser:
self._logger.debug('User {0} is not a superuser. Only superuser can remove pgpm'
.format(current_user))
sys.exit(1)
self._logger.debug('Removing pgpm from DB by dropping schema {0}'.format(self._pgpm_schema_name))
cur.execute(drop_schema_cascade_script.format(schema_name=self._pgpm_schema_name))
# Commit transaction
self._conn.commit()
self._conn.close()
return 0 | Removes pgpm from db and all related metadata (_pgpm schema). Install packages are left as they are
:return: 0 if successful and error otherwise |
def save(self, *args, **kwargs):
if self.pk is None:
if hasattr(self, 'product'):
if not self.description:
self.description = self.product
self.price_recommended = self.product.price_base
elif hasattr(self, 'line_order'):
if not self.description:
self.description = self.line_order.product
self.price_recommended = self.line_order.price_base
if hasattr(self, 'tax') and hasattr(self, 'type_tax'):
self.tax = self.type_tax.tax
if hasattr(self, 'product'):
self.tax_label = self.product.product.tax.name
if self.product.code:
self.code = self.product.code
else:
self.code = self.product.product.code
self.update_total(force_save=False)
if 'force_save' in kwargs:
kwargs.pop('force_save')
return super(GenLineProduct, self).save(*args, **kwargs) | si al guardar una linea asociada a un documento bloqueado (lock==True), duplicar el documento en una nueva versión |
def create_albaran_automatic(pk, list_lines):
line_bd = SalesLineAlbaran.objects.filter(line_order__pk__in=list_lines).values_list('line_order__pk')
if line_bd.count() == 0 or len(list_lines) != len(line_bd[0]):
# solo aquellas lineas de pedidos que no estan ya albarandas
if line_bd.count() != 0:
for x in line_bd[0]:
list_lines.pop(list_lines.index(x))
GenLineProduct.create_albaran_from_order(pk, list_lines) | creamos de forma automatica el albaran |
def create_invoice_from_albaran(pk, list_lines):
context = {}
if list_lines:
new_list_lines = [x[0] for x in SalesLineAlbaran.objects.values_list('line_order__pk').filter(
pk__in=[int(x) for x in list_lines]
).exclude(invoiced=True)]
if new_list_lines:
lo = SalesLineOrder.objects.values_list('order__pk').filter(pk__in=new_list_lines)[:1]
if lo and lo[0] and lo[0][0]:
new_pk = lo[0][0]
context = GenLineProduct.create_invoice_from_order(new_pk, new_list_lines)
if 'error' not in context or not context['error']:
SalesLineAlbaran.objects.filter(
pk__in=[int(x) for x in list_lines]
).exclude(invoiced=True).update(invoiced=True)
return context
else:
error = _('Pedido no encontrado')
else:
error = _('Lineas no relacionadas con pedido')
else:
error = _('Lineas no seleccionadas')
context['error'] = error
return context | la pk y list_lines son de albaranes, necesitamos la info de las lineas de pedidos |
def create_invoice_from_ticket(pk, list_lines):
context = {}
if list_lines:
new_list_lines = [x[0] for x in SalesLineTicket.objects.values_list('line_order__pk').filter(pk__in=[int(x) for x in list_lines])]
if new_list_lines:
lo = SalesLineOrder.objects.values_list('order__pk').filter(pk__in=new_list_lines)[:1]
if lo and lo[0] and lo[0][0]:
new_pk = lo[0][0]
return GenLineProduct.create_invoice_from_order(new_pk, new_list_lines)
else:
error = _('Pedido no encontrado')
else:
error = _('Lineas no relacionadas con pedido')
else:
error = _('Lineas no seleccionadas')
context['error'] = error
return context | la pk y list_lines son de ticket, necesitamos la info de las lineas de pedidos |
def set_options(self, options):
with transaction.atomic():
for option in options:
opt = self.line_basket_option_sales.filter(
product_option=option['product_option']
).first()
if opt: # edit
change = False
if opt.quantity != option['quantity']:
opt.quantity = option['quantity']
change = True
if opt.product_final != option['product_final']:
opt.product_final = option['product_final']
change = True
if change:
opt.save()
else: # new
opt = SalesLineBasketOption()
# raise Exception(self.pk, self.__dict__, self)
# raise Exception(self.pk)
opt.line_budget = SalesLineBasket.objects.get(pk=self.pk)
opt.product_option = option['product_option']
opt.product_final = option['product_final']
opt.quantity = option['quantity']
opt.save() | options = [{
'product_option': instance of ProductFinalOption,
'product_final': instance of ProductFinal,
'quantity': Float
}, ] |
def findmodules(path, recurse=False):
output = set()
roots = set()
for root, folders, files in os.walk(path):
# add packages
for folder in folders:
pkgpath = os.path.join(root, folder, '__init__.py')
if os.path.exists(pkgpath):
output.add(packageFromPath(pkgpath))
# add modules
rootpth = packageRootPath(root)
rootpkg = packageFromPath(root)
roots.add(rootpth)
for file_ in files:
name, ext = os.path.splitext(file_)
if ext not in ('.py', '.pyo', '.pyc'):
continue
if name in ('__init__', '__plugins__'):
continue
if rootpkg:
output.add(rootpkg + '.' + name)
else:
output.add(name)
if not recurse:
break
return list(output), list(roots) | Looks up the modules for the given path and returns a list of the
packages. If the recurse flag is set to True, then it will look
through the package recursively.
:param path | <str>
recurse | <bool>
:return ([<str>, ..] modules, [<str>, ..] paths) |
def importfile(filename):
pkg = packageFromPath(filename, includeModule=True)
root = packageRootPath(filename)
if root not in sys.path:
sys.path.insert(0, root)
__import__(pkg)
return sys.modules[pkg] | Imports a module specifically from a file.
:param filename | <str>
:return <module> || None |
def importobject(module_name, object_name):
if module_name not in sys.modules:
try:
__import__(module_name)
except ImportError:
logger.debug(traceback.print_exc())
logger.error('Could not import module: %s', module_name)
return None
module = sys.modules.get(module_name)
if not module:
logger.warning('No module %s found.' % module_name)
return None
if not hasattr(module, object_name):
logger.warning('No object %s in %s.' % (object_name, module_name))
return None
return getattr(module, object_name) | Imports the object with the given name from the inputted module.
:param module_name | <str>
object_name | <str>
:usage |>>> import projex
|>>> modname = 'projex.envmanager'
|>>> attr = 'EnvManager'
|>>> EnvManager = projex.importobject(modname, attr)
:return <object> || None |
def packageRootPath(path):
path = nstr(path)
if os.path.isfile(path):
path = os.path.dirname(path)
parts = os.path.normpath(path).split(os.path.sep)
package_parts = []
for i in range(len(parts), 0, -1):
filename = os.path.sep.join(parts[:i] + ['__init__.py'])
if not os.path.isfile(filename):
break
package_parts.insert(0, parts[i - 1])
if not package_parts:
return path
return os.path.abspath(os.path.sep.join(parts[:-len(package_parts)])) | Returns the root file path that defines a Python package from the inputted
path.
:param path | <str>
:return <str> |
def packageFromPath(path, includeModule=False):
path = nstr(path)
module = ''
if os.path.isfile(path):
path, fname = os.path.split(path)
if fname.endswith('.py') and fname != '__init__.py':
module = fname.split('.')[0]
parts = os.path.normpath(path).split(os.path.sep)
package_parts = []
for i in range(len(parts), 0, -1):
filename = os.path.sep.join(parts[:i] + ['__init__.py'])
if not os.path.isfile(filename):
break
package_parts.insert(0, parts[i - 1])
if includeModule and module:
package_parts.append(module)
return '.'.join(package_parts) | Determines the python package path based on the inputted path.
:param path | <str>
:return <str> |
def website(app=None, mode='home', subcontext='UserGuide'):
base_url = WEBSITES.get(mode, '')
if app and base_url:
opts = {'app': app, 'base_url': base_url}
base_url = SUBCONTEXT_MAP.get((mode, subcontext), base_url)
base_url %= opts
return base_url | Returns the website location for projex software.
:param app | <str> || None
mode | <str> (home, docs, blog, dev)
:return <str> |
def _check_values(in_values):
out_values = []
for value in in_values:
# if isinstance(value, (dict, list)):
# out_values.append(json.dumps(value))
# else:
out_values.append(value)
return tuple(out_values) | Check if values need to be converted before they get mogrify'd |
def clone(srcpath, destpath, vcs=None):
vcs = vcs or probe(srcpath)
cls = _get_repo_class(vcs)
return cls.clone(srcpath, destpath) | Clone an existing repository.
:param str srcpath: Path to an existing repository
:param str destpath: Desired path of new repository
:param str vcs: Either ``git``, ``hg``, or ``svn``
:returns VCSRepo: The newly cloned repository
If ``vcs`` is not given, then the repository type is discovered from
``srcpath`` via :func:`probe`. |
def probe(path):
import os
from .common import UnknownVCSType
if os.path.isdir(os.path.join(path, '.git')):
return 'git'
elif os.path.isdir(os.path.join(path, '.hg')):
return 'hg'
elif (
os.path.isfile(os.path.join(path, 'config')) and
os.path.isdir(os.path.join(path, 'objects')) and
os.path.isdir(os.path.join(path, 'refs')) and
os.path.isdir(os.path.join(path, 'branches'))
):
return 'git'
elif (
os.path.isfile(os.path.join(path, 'format')) and
os.path.isdir(os.path.join(path, 'conf')) and
os.path.isdir(os.path.join(path, 'db')) and
os.path.isdir(os.path.join(path, 'locks'))
):
return 'svn'
else:
raise UnknownVCSType(path) | Probe a repository for its type.
:param str path: The path of the repository
:raises UnknownVCSType: if the repository type couldn't be inferred
:returns str: either ``git``, ``hg``, or ``svn``
This function employs some heuristics to guess the type of the repository. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.