code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def remove_lib(lib_name): targ_dlib = libraries_dir() / lib_name log.debug('remove %s', targ_dlib) targ_dlib.rmtree()
remove library. :param lib_name: library name (e.g. 'PS2Keyboard') :rtype: None
def _read_holidays(self, filename): cal = Calendar.from_ical(open(filename, 'rb').read()) holidays = [] for component in cal.walk('VEVENT'): start = component.decoded('DTSTART') try: end = component.decoded('DTEND') except KeyError: # RFC allows DTEND to be missing if isinstance(start, datetime): # For DATETIME instances, the event ends immediately. end = start elif isinstance(start, date): # For DATE instances, the event ends tomorrow end = start + timedelta(days=1) else: raise KeyError, 'DTEND is missing and DTSTART is not of DATE or DATETIME type' if isinstance(start, date) and not isinstance(start, datetime): assert (isinstance(end, date) and not isinstance(end, datetime)), \ 'DTSTART is of DATE type but DTEND is not of DATE type (got %r instead)' % type(end) # All-day event, set times to midnight local time start = datetime.combine(start, time.min) end = datetime.combine(end, time.min) # check for TZ data if start.tzinfo is None or end.tzinfo is None: # One of them is missing tzinfo, replace both with this office's # local time. Assume standard time if ambiguous. start = self.tz.localize(start, is_dst=False) end = self.tz.localize(end, is_dst=False) yield (start, end)
Read holidays from an iCalendar-format file.
def in_hours(self, when): # convert to local timezone when = when.astimezone(self.tz) # is it a work day? if when.weekday() not in self.hours: # not a work day return False # work out if it is one of the ranges for start, end in self.hours[when.weekday()]: if start <= when.time() <= end: # it's in that range # is it a public holiday? check if it is any range for hstart, hend in self.holidays: if when >= hstart and when <= hend: # it's inside a holiday area. return False # not in a holiday zone, which means it's business time. return True # not in any range of hours, and was on a work day return False
Find if the given :class:`~datetime.datetime` is in business hours for this office. :param datetime.datetime when: The time to check :returns: True if the given time is in business hours for the office, False otherwise. :rtype: bool
def in_hours(self, office=None, when=None): if when == None: when = datetime.now(tz=utc) if office == None: for office in self.offices.itervalues(): if office.in_hours(when): return True return False else: # check specific office return self.offices[office].in_hours(when)
Finds if it is business hours in the given office. :param office: Office ID to look up, or None to check if any office is in business hours. :type office: str or None :param datetime.datetime when: When to check the office is open, or None for now. :returns: True if it is business hours, False otherwise. :rtype: bool :raises KeyError: If the office is unknown.
def setup_logging(namespace): loglevel = { 0: logging.ERROR, 1: logging.WARNING, 2: logging.INFO, 3: logging.DEBUG, }.get(namespace.verbosity, logging.DEBUG) if namespace.verbosity > 1: logformat = '%(levelname)s csvpandas %(lineno)s %(message)s' else: logformat = 'csvpandas %(message)s' logging.basicConfig(stream=namespace.log, format=logformat, level=loglevel)
setup global logging
def parse_subcommands(parser, subcommands, argv): subparsers = parser.add_subparsers(dest='subparser_name') # add help sub-command parser_help = subparsers.add_parser( 'help', help='Detailed help for actions using `help <action>`') parser_help.add_argument('action', nargs=1) # add all other subcommands modules = [ name for _, name, _ in pkgutil.iter_modules(subcommands.__path__)] commands = [m for m in modules if m in argv] actions = {} # `commands` will contain the module corresponding to a single # subcommand if provided; otherwise, generate top-level help # message from all submodules in `modules`. for name in commands or modules: # set up subcommand help text. The first line of the dosctring # in the module is displayed as the help text in the # script-level help message (`script -h`). The entire # docstring is displayed in the help message for the # individual subcommand ((`script action -h`)) # if no individual subcommand is specified (run_action[False]), # a full list of docstrings is displayed try: imp = '{}.{}'.format(subcommands.__name__, name) mod = importlib.import_module(imp) except Exception as e: log.error(e) continue subparser = subparsers.add_parser( name, help=mod.__doc__.lstrip().split('\n', 1)[0], description=mod.__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) mod.build_parser(subparser) # see global subcommands/__init__.py subcommands.build_parser(subparser) actions[name] = mod.action return parser, actions
Setup all sub-commands
def opener(mode='r'): def open_file(f): if f is sys.stdout or f is sys.stdin: return f elif f == '-': return sys.stdin if 'r' in mode else sys.stdout elif f.endswith('.bz2'): return bz2.BZ2File(f, mode) elif f.endswith('.gz'): return gzip.open(f, mode) else: return open(f, mode) return open_file
Factory for creating file objects Keyword Arguments: - mode -- A string indicating how the file is to be opened. Accepts the same values as the builtin open() function. - bufsize -- The file's desired buffer size. Accepts the same values as the builtin open() function.
def get(self, id, no_summary=False): resp = self.client.accounts.get(id) if no_summary: return self.display(resp) results = [] # Get a list of all volumes for this tenant id client = LunrClient(self.get_admin(), debug=self.debug) volumes = client.volumes.list(account_id=resp['id']) #volumes = self.client.volumes.list(resp['id']) for volume in volumes: if volume['status'] == 'DELETED': continue results.append(volume) self.display(resp, ['name', 'status', 'last_modified', 'created_at']) if results: return self.display(response(results, 200), ['id', 'status', 'size']) else: print("-- This account has no active volumes --") print("\nThis is a summary, use --no-summary " "to see the entire response")
List details for a specific tenant id
def create(self, id): resp = self.client.accounts.create(id=id) self.display(resp)
Create a new tenant id
def delete(self, id): resp = self.client.accounts.delete(id) self.display(resp)
Delete an tenant id
def main(): parser = argparse.ArgumentParser() group_tcp = parser.add_argument_group('TCP') group_tcp.add_argument('--tcp', dest='mode', action='store_const', const=PROP_MODE_TCP, help="Set tcp mode") group_tcp.add_argument('--host', dest='hostname', help="Specify hostname", default='') group_tcp.add_argument('--port', dest='port', help="Specify port", default=23, type=int) group_serial = parser.add_argument_group('Serial') group_serial.add_argument('--serial', dest='mode', action='store_const', const=PROP_MODE_SERIAL, help="Set serial mode") group_serial.add_argument('--interface', dest='interface', help="Specify interface", default='') group_file = parser.add_argument_group('File') group_file.add_argument('--file', dest='mode', action='store_const', const=PROP_MODE_FILE, help="Set file mode") group_file.add_argument('--name', dest='file', help="Specify file name", default='') args = parser.parse_args() kwb = KWBEasyfire(args.mode, args.hostname, args.port, args.interface, 0, args.file) kwb.run_thread() time.sleep(5) kwb.stop_thread() print(kwb)
Main method for debug purposes.
def _open_connection(self): if (self._mode == PROP_MODE_SERIAL): self._serial = serial.Serial(self._serial_device, self._serial_speed) elif (self._mode == PROP_MODE_TCP): self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._socket.connect((self._ip, self._port)) elif (self._mode == PROP_MODE_FILE): self._file = open(self._file_path, "r")
Open a connection to the easyfire unit.
def _close_connection(self): if (self._mode == PROP_MODE_SERIAL): self._serial.close() elif (self._mode == PROP_MODE_TCP): self._socket.close() elif (self._mode == PROP_MODE_FILE): self._file.close()
Close the connection to the easyfire unit.
def _add_to_checksum(self, checksum, value): checksum = self._byte_rot_left(checksum, 1) checksum = checksum + value if (checksum > 255): checksum = checksum - 255 self._debug(PROP_LOGLEVEL_TRACE, "C: " + str(checksum) + " V: " + str(value)) return checksum
Add a byte to the checksum.
def _read_byte(self): to_return = "" if (self._mode == PROP_MODE_SERIAL): to_return = self._serial.read(1) elif (self._mode == PROP_MODE_TCP): to_return = self._socket.recv(1) elif (self._mode == PROP_MODE_FILE): to_return = struct.pack("B", int(self._file.readline())) _LOGGER.debug("READ: " + str(ord(to_return))) self._logdata.append(ord(to_return)) if (len(self._logdata) > self._logdatalen): self._logdata = self._logdata[len(self._logdata) - self._logdatalen:] self._debug(PROP_LOGLEVEL_TRACE, "READ: " + str(ord(to_return))) return to_return
Read a byte from input.
def _sense_packet_to_data(packet): data = bytearray(0) last = 0 i = 1 while (i < len(packet)): if not (last == 2 and packet[i] == 0): data.append(packet[i]) last = packet[i] i += 1 return data
Remove the escape pad bytes from a sense packet (\2\0 -> \2).
def _decode_temp(byte_1, byte_2): temp = (byte_1 << 8) + byte_2 if (temp > 32767): temp = temp - 65536 temp = temp / 10 return temp
Decode a signed short temperature as two bytes to a single number.
def _decode_sense_packet(self, version, packet): data = self._sense_packet_to_data(packet) offset = 4 i = 0 datalen = len(data) - offset - 6 temp_count = int(datalen / 2) temp = [] for i in range(temp_count): temp_index = i * 2 + offset temp.append(self._decode_temp(data[temp_index], data[temp_index + 1])) self._debug(PROP_LOGLEVEL_DEBUG, "T: " + str(temp)) for sensor in self._sense_sensor: if (sensor.sensor_type == PROP_SENSOR_TEMPERATURE): sensor.value = temp[sensor.index] elif (sensor.sensor_type == PROP_SENSOR_RAW): sensor.value = packet self._debug(PROP_LOGLEVEL_DEBUG, str(self))
Decode a sense packet into the list of sensors.
def _decode_ctrl_packet(self, version, packet): for i in range(5): input_bit = packet[i] self._debug(PROP_LOGLEVEL_DEBUG, "Byte " + str(i) + ": " + str((input_bit >> 7) & 1) + str((input_bit >> 6) & 1) + str((input_bit >> 5) & 1) + str((input_bit >> 4) & 1) + str((input_bit >> 3) & 1) + str((input_bit >> 2) & 1) + str((input_bit >> 1) & 1) + str(input_bit & 1)) for sensor in self._ctrl_sensor: if (sensor.sensor_type == PROP_SENSOR_FLAG): sensor.value = (packet[sensor.index // 8] >> (sensor.index % 8)) & 1 elif (sensor.sensor_type == PROP_SENSOR_RAW): sensor.value = packet
Decode a control packet into the list of sensors.
def run(self): while (self._run_thread): (mode, version, packet) = self._read_packet() if (mode == PROP_PACKET_SENSE): self._decode_sense_packet(version, packet) elif (mode == PROP_PACKET_CTRL): self._decode_ctrl_packet(version, packet)
Main thread that reads from input and populates the sensors.
def run_thread(self): self._run_thread = True self._thread.setDaemon(True) self._thread.start()
Run the main thread.
def unused(self, _dict): for key, value in _dict.items(): if value is None: del _dict[key] return _dict
Remove empty parameters from the dict
def required(self, method, _dict, require): for key in require: if key not in _dict: raise LunrError("'%s' is required argument for method '%s'" % (key, method))
Ensure the required items are in the dictionary
def allowed(self, method, _dict, allow): for key in _dict.keys(): if key not in allow: raise LunrError("'%s' is not an argument for method '%s'" % (key, method))
Only these items are allowed in the dictionary
def parse_event_name(name): try: app, event = name.split('.') return '{}.{}'.format(app, EVENTS_MODULE_NAME), event except ValueError: raise InvalidEventNameError( (u'The name "{}" is invalid. ' u'Make sure you are using the "app.KlassName" format' ).format(name))
Returns the python module and obj given an event name
def find_event(name): try: module, klass = parse_event_name(name) return getattr(import_module(module), klass) except (ImportError, AttributeError): raise EventNotFoundError( ('Event "{}" not found. ' 'Make sure you have a class called "{}" inside the "{}" ' 'module.'.format(name, klass, module)))
Actually import the event represented by name Raises the `EventNotFoundError` if it's not possible to find the event class refered by `name`.
def cleanup_handlers(event=None): if event: if event in HANDLER_REGISTRY: del HANDLER_REGISTRY[event] if event in EXTERNAL_HANDLER_REGISTRY: del EXTERNAL_HANDLER_REGISTRY[event] else: HANDLER_REGISTRY.clear() EXTERNAL_HANDLER_REGISTRY.clear()
Remove handlers of a given `event`. If no event is informed, wipe out all events registered. Be careful!! This function is intended to help when writing tests and for debugging purposes. If you call it, all handlers associated to an event (or to all of them) will be disassociated. Which means that you'll have to reload all modules that teclare handlers. I'm sure you don't want it.
def find_handlers(event_name, registry=HANDLER_REGISTRY): handlers = [] # event_name can be a BaseEvent or the string representation if isinstance(event_name, basestring): matched_events = [event for event in registry.keys() if fnmatch.fnmatchcase(event_name, event)] for matched_event in matched_events: handlers.extend(registry.get(matched_event)) else: handlers = registry.get(find_event(event_name), []) return handlers
Small helper to find all handlers associated to a given event If the event can't be found, an empty list will be returned, since this is an internal function and all validation against the event name and its existence was already performed.
def process(event_name, data): deserialized = loads(data) event_cls = find_event(event_name) event = event_cls(event_name, deserialized) try: event.clean() except ValidationError as exc: if os.environ.get('EVENTLIB_RAISE_ERRORS'): raise else: logger.warning( "The event system just got an exception while cleaning " "data for the event '{}'\ndata: {}\nexc: {}".format( event_name, data, str(exc))) return for handler in find_handlers(event_name): try: handler(deserialized) except Exception as exc: logger.warning( (u'One of the handlers for the event "{}" has failed with the ' u'following exception: {}').format(event_name, str(exc))) if getsetting('DEBUG'): raise exc event._broadcast()
Iterates over the event handler registry and execute each found handler. It takes the event name and its its `data`, passing the return of `ejson.loads(data)` to the found handlers.
def process_external(event_name, data): for handler in find_external_handlers(event_name): try: handler(data) except Exception as exc: logger.warning( (u'One of the handlers for the event "{}" has failed with the ' u'following exception: {}').format(event_name, str(exc))) if getsetting('DEBUG'): raise exc
Iterates over the event handler registry and execute each found handler. It takes the event name and its `data`, passing the return of data to the found handlers.
def get_default_values(data): request = data.get('request') result = {} result['__datetime__'] = datetime.now() result['__ip_address__'] = request and get_ip(request) or '0.0.0.0' return result
Return all default values that an event should have
def filter_data_values(data): banned = ('request',) return {key: val for key, val in data.items() if not key in banned}
Remove special values that log function can take There are some special values, like "request" that the `log()` function can take, but they're not meant to be passed to the celery task neither for the event handlers. This function filter these keys and return another dict without them.
def import_event_modules(): for installed_app in getsetting('INSTALLED_APPS'): module_name = u'{}.{}'.format(installed_app, EVENTS_MODULE_NAME) try: import_module(module_name) except ImportError: pass
Import all events declared for all currently installed apps This function walks through the list of installed apps and tries to import a module named `EVENTS_MODULE_NAME`.
def handle_expired_accounts(): ACTIVATED = RegistrationProfile.ACTIVATED expiration_date = datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS) to_delete = [] print "Processing %s registration profiles..." % str(RegistrationProfile.objects.all().count()) for profile in RegistrationProfile.objects.all(): # if registration profile is expired, deactive user. print "Processing %s" % profile.user # If profile has been activated already, set it to be removed # and move on to next registration profile if profile.activation_key == ACTIVATED: print "Found Active" to_delete.append(profile.pk) continue # If the user has not activated their account and the activation # days have passed, deactivate the user and send an email to user. if profile.user.is_active and profile.user.date_joined + expiration_date <= datetime.datetime.now(): print "Found Expired" user = profile.user user.is_active = False # Send an email notifing user of there account becoming inactive. site = Site.objects.get_current() ctx_dict = { 'site': site, 'activation_key': profile.activation_key} subject = render_to_string( 'registration/email/emails/account_expired_subject.txt', ctx_dict) subject = ''.join(subject.splitlines()) message = render_to_string( 'registration/email/emails/account_expired.txt', ctx_dict) user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL) # Only save the user instance after the email is sent. user.save() # Delete the registration profiles that were set to be deleted, aka # user has already activated their account. print "Deleting %s registration profiles." % str(len(to_delete)) RegistrationProfile.objects.filter(pk__in=to_delete).delete()
Check of expired accounts.
def activate(self, request, activation_key): if SHA1_RE.search(activation_key): try: profile = RegistrationProfile.objects.get(activation_key=activation_key) except RegistrationProfile.DoesNotExist: return False user = profile.user user.is_active = True user.save() profile.activation_key = RegistrationProfile.ACTIVATED profile.save() return user return False
Override default activation process. This will activate the user even if its passed its expiration date.
def send_activation_email(self, user, profile, password, site): ctx_dict = { 'password': password, 'site': site, 'activation_key': profile.activation_key, 'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS} subject = render_to_string( 'registration/email/emails/password_subject.txt', ctx_dict) # Email subject *must not* contain newlines subject = ''.join(subject.splitlines()) message = render_to_string('registration/email/emails/password.txt', ctx_dict) try: user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL) except: pass
Custom send email method to supplied the activation link and new generated password.
def post_registration_redirect(self, request, user): next_url = "/registration/register/complete/" if "next" in request.GET or "next" in request.POST: next_url = request.GET.get("next", None) or request.POST.get("next", None) or "/" return (next_url, (), {})
After registration, redirect to the home page or supplied "next" query string or hidden field value.
def next(self): if self.start + self.size > self.total_size: result = None else: result = Batch(self.start + self.size, self.size, self.total_size) return result
Returns the next batch for the batched sequence or `None`, if this batch is already the last batch. :rtype: :class:`Batch` instance or `None`.
def previous(self): if self.start - self.size < 0: result = None else: result = Batch(self.start - self.size, self.size, self.total_size) return result
Returns the previous batch for the batched sequence or `None`, if this batch is already the first batch. :rtype: :class:`Batch` instance or `None`.
def last(self): start = max(self.number - 1, 0) * self.size return Batch(start, self.size, self.total_size)
Returns the last batch for the batched sequence. :rtype: :class:`Batch` instance.
def number(self): return int(math.ceil(self.total_size / float(self.size)))
Returns the number of batches the batched sequence contains. :rtype: integer.
def watermark(url, args=''): # initialize some variables args = args.split(',') params = dict( name=args.pop(0), opacity=0.5, tile=False, scale=1.0, greyscale=False, rotation=0, position=None, quality=QUALITY, obscure=OBSCURE_ORIGINAL, random_position_once=RANDOM_POSITION_ONCE, ) params['url'] = unquote(url) # iterate over all parameters to see what we need to do for arg in args: key, value = arg.split('=') key, value = key.strip(), value.strip() if key == 'position': params['position'] = value elif key == 'opacity': params['opacity'] = utils._percent(value) elif key == 'tile': params['tile'] = bool(int(value)) elif key == 'scale': params['scale'] = value elif key == 'greyscale': params['greyscale'] = bool(int(value)) elif key == 'rotation': params['rotation'] = value elif key == 'quality': params['quality'] = int(value) elif key == 'obscure': params['obscure'] = bool(int(value)) elif key == 'random_position_once': params['random_position_once'] = bool(int(value)) return Watermarker()(**params)
Returns the URL to a watermarked copy of the image specified.
def _get_filesystem_path(self, url_path, basedir=settings.MEDIA_ROOT): if url_path.startswith(settings.MEDIA_URL): url_path = url_path[len(settings.MEDIA_URL):] # strip media root url return os.path.normpath(os.path.join(basedir, url2pathname(url_path)))
Makes a filesystem path from the specified URL path
def generate_filename(self, mark, **kwargs): kwargs = kwargs.copy() kwargs['opacity'] = int(kwargs['opacity'] * 100) kwargs['st_mtime'] = kwargs['fstat'].st_mtime kwargs['st_size'] = kwargs['fstat'].st_size params = [ '%(original_basename)s', 'wm', 'w%(watermark)i', 'o%(opacity)i', 'gs%(greyscale)i', 'r%(rotation)i', 'fm%(st_mtime)i', 'fz%(st_size)i', 'p%(position)s', ] scale = kwargs.get('scale', None) if scale and scale != mark.size: params.append('_s%i' % (float(kwargs['scale'][0]) / mark.size[0] * 100)) if kwargs.get('tile', None): params.append('_tiled') # make thumbnail filename filename = '%s%s' % ('_'.join(params), kwargs['ext']) return filename % kwargs
Comes up with a good filename for the watermarked image
def get_url_path(self, basedir, original_basename, ext, name, obscure=True): try: hash = hashlib.sha1(smart_str(name)).hexdigest() except TypeError: hash = hashlib.sha1(smart_str(name).encode('utf-8')).hexdigest() # figure out where the watermark would be saved on the filesystem if obscure is True: logger.debug('Obscuring original image name: %s => %s' % (name, hash)) url_path = os.path.join(basedir, hash + ext) else: logger.debug('Not obscuring original image name.') url_path = os.path.join(basedir, hash, original_basename + ext) # make sure the destination directory exists try: fpath = self._get_filesystem_path(url_path) os.makedirs(os.path.dirname(fpath)) except OSError as e: if e.errno == errno.EEXIST: pass # not to worry, directory exists else: logger.error('Error creating path: %s' % traceback.format_exc()) raise else: logger.debug('Created directory: %s' % os.path.dirname(fpath)) return url_path
Determines an appropriate watermark path
def create_watermark(self, target, mark, fpath, quality=QUALITY, **kwargs): im = utils.watermark(target, mark, **kwargs) im.save(fpath, quality=quality) return im
Create the watermarked image on the filesystem
def _val(var, is_percent=False): try: if is_percent: var = float(int(var.strip('%')) / 100.0) else: var = int(var) except ValueError: raise ValueError('invalid watermark parameter: ' + var) return var
Tries to determine the appropriate value of a particular variable that is passed in. If the value is supposed to be a percentage, a whole integer will be sought after and then turned into a floating point number between 0 and 1. If the value is supposed to be an integer, the variable is cast into an integer.
def reduce_opacity(img, opacity): assert opacity >= 0 and opacity <= 1 if img.mode != 'RGBA': img = img.convert('RGBA') else: img = img.copy() alpha = img.split()[3] alpha = ImageEnhance.Brightness(alpha).enhance(opacity) img.putalpha(alpha) return img
Returns an image with reduced opacity.
def determine_scale(scale, img, mark): if scale: try: scale = float(scale) except (ValueError, TypeError): pass if isinstance(scale, six.string_types) and scale.upper() == 'F': # scale watermark to full, but preserve the aspect ratio scale = min( float(img.size[0]) / mark.size[0], float(img.size[1]) / mark.size[1] ) elif isinstance(scale, six.string_types) and scale.upper() == 'R': # scale watermark to % of source image and preserve the aspect ratio scale = min( float(img.size[0]) / mark.size[0], float(img.size[1]) / mark.size[1] ) / 100 * settings.WATERMARK_PERCENTAGE elif type(scale) not in (float, int): raise ValueError('Invalid scale value "%s"! Valid values are "F" ' 'for ratio-preserving scaling, "R%%" for percantage aspect ' 'ratio of source image and floating-point numbers and ' 'integers greater than 0.' % scale) # determine the new width and height w = int(mark.size[0] * float(scale)) h = int(mark.size[1] * float(scale)) # apply the new width and height, and return the new `mark` return (w, h) else: return mark.size
Scales an image using a specified ratio, 'F' or 'R'. If `scale` is 'F', the image is scaled to be as big as possible to fit in `img` without falling off the edges. If `scale` is 'R', the watermark resizes to a percentage of minimum size of source image. Returns the scaled `mark`.
def determine_rotation(rotation, mark): if isinstance(rotation, six.string_types) and rotation.lower() == 'r': rotation = random.randint(0, 359) else: rotation = _int(rotation) return rotation
Determines the number of degrees to rotate the watermark image.
def determine_position(position, img, mark): left = top = 0 max_left = max(img.size[0] - mark.size[0], 0) max_top = max(img.size[1] - mark.size[1], 0) #Added a 10px margin from corners to apply watermark. margin = 10 if not position: position = 'r' if isinstance(position, tuple): left, top = position elif isinstance(position, six.string_types): position = position.lower() # corner positioning if position in ['tl', 'tr', 'br', 'bl']: if 't' in position: top = margin elif 'b' in position: top = max_top - margin if 'l' in position: left = margin elif 'r' in position: left = max_left - margin # center positioning elif position == 'c': left = int(max_left / 2) top = int(max_top / 2) # random positioning elif position == 'r': left = random.randint(0, max_left) top = random.randint(0, max_top) # relative or absolute positioning elif 'x' in position: left, top = position.split('x') if '%' in left: left = max_left * _percent(left) else: left = _int(left) if '%' in top: top = max_top * _percent(top) else: top = _int(top) return int(left), int(top)
Options: TL: top-left TR: top-right BR: bottom-right BL: bottom-left C: centered R: random X%xY%: relative positioning on both the X and Y axes X%xY: relative positioning on the X axis and absolute positioning on the Y axis XxY%: absolute positioning on the X axis and relative positioning on the Y axis XxY: absolute positioning on both the X and Y axes
def parsed_file(config_file): parser = ConfigParser(allow_no_value=True) parser.readfp(config_file) return parser
Parse an ini-style config file.
def commands(config, names): commands = {cmd: Command(**dict((minus_to_underscore(k), v) for k, v in config.items(cmd))) for cmd in config.sections() if cmd != 'packages'} try: return tuple(commands[x] for x in names) except KeyError as e: raise RuntimeError( 'Section [commands] in the config file does not contain the ' 'key {.args[0]!r} you requested to execute.'.format(e))
Return the list of commands to run.
def project_path(*names): return os.path.join(os.path.dirname(__file__), *names)
Path to a file in the project.
def get_osa_commit(repo, ref, rpc_product=None): osa_differ.checkout(repo, ref) functions_path = os.path.join(repo.working_tree_dir, 'scripts/functions.sh') release_path = os.path.join(repo.working_tree_dir, 'playbooks/vars/rpc-release.yml') if os.path.exists(release_path): with open(release_path) as f: rpc_release_data = yaml.safe_load(f.read()) rpc_product_releases = rpc_release_data['rpc_product_releases'] release_data = rpc_product_releases[rpc_product] return release_data['osa_release'] elif repo.submodules['openstack-ansible']: return repo.submodules['openstack-ansible'].hexsha elif os.path.exists(functions_path): # This branch doesn't use a submodule for OSA # Pull the SHA out of functions.sh quoted_re = re.compile('OSA_RELEASE:-?"?([^"}]+)["}]') with open(functions_path, "r") as funcs: for line in funcs.readlines(): match = quoted_re.search(line) if match: return match.groups()[0] else: raise SHANotFound( ("Cannot find OSA SHA in submodule or " "script: {}".format(functions_path))) else: raise SHANotFound('No OSA SHA was able to be derived.')
Get the OSA sha referenced by an RPCO Repo.
def validate_rpc_sha(repo_dir, commit): # Is the commit valid? Just in case the commit is a # PR ref, we try both the ref given and the ref prepended # with the remote 'origin'. try: osa_differ.validate_commits(repo_dir, [commit]) except exceptions.InvalidCommitException: log.debug("The reference {c} cannot be found. Prepending " "origin remote and retrying.".format(c=commit)) commit = 'origin/' + commit osa_differ.validate_commits(repo_dir, [commit]) return commit
Validate/update a SHA given for the rpc-openstack repo.
def make_rpc_report(repo_dir, old_commit, new_commit, args): # Do we have a valid commit range? # NOTE: # An exception is thrown by osa_differ if these two commits # are the the same, but it is sometimes necessary to compare # two RPC tags that have the same OSA SHA. For example, # comparing two tags that only have differences between the # two RPCO commit, but no differences between the OSA SHAs # that correspond to those two commits. # To handle this case, the exception will be caught and flow # of execution will continue normally. try: osa_differ.validate_commit_range(repo_dir, old_commit, new_commit) except exceptions.InvalidCommitRangeException: pass # Get the commits in the range commits = osa_differ.get_commits(repo_dir, old_commit, new_commit) # Start off our report with a header and our OpenStack-Ansible commits. template_vars = { 'args': args, 'repo': 'rpc-openstack', 'commits': commits, 'commit_base_url': osa_differ.get_commit_url(args.rpc_repo_url), 'old_sha': old_commit, 'new_sha': new_commit } return render_template('offline-header.j2', template_vars)
Create initial RST report header for OpenStack-Ansible.
def parse_arguments(): parser = create_parser() args = parser.parse_args() if not args.role_requirements_old_commit: args.role_requirements_old_commit = args.role_requirements if not args.rpc_product_old_commit: args.rpc_product_old_commit = args.rpc_product return args
Parse arguments.
def publish_report(report, args, old_commit, new_commit): # Print the report to stdout unless the user specified --quiet. output = "" if not args.quiet and not args.gist and not args.file: return report if args.gist: gist_url = post_gist(report, old_commit, new_commit) output += "\nReport posted to GitHub Gist: {0}".format(gist_url) if args.file is not None: with open(args.file, 'w') as f: f.write(report.encode('utf-8')) output += "\nReport written to file: {0}".format(args.file) return output
Publish the RST report based on the user request.
def main(raw_args=None): parser = argparse.ArgumentParser( description="poor man's integration testing") parser.add_argument( 'cmds', metavar='cmd', default=['test'], nargs='*', help='Run command(s) defined in the configuration file. Each command ' 'is run on each package before proceeding with the next command. ' '(default: "test")') parser.add_argument( '-c', '--config', dest='file', type=argparse.FileType('r'), default='toll.ini', help='ini-style file to read the configuration from') parser.add_argument( '--start-at', dest='start_at', type=str, default='', help='Skip over the packages in the config file listed before this' ' one. (It does a substring match to find the first package.)') args = parser.parse_args(raw_args) config_file = config.parsed_file(args.file) commands = config.commands(config_file, args.cmds) packages = config.packages(config_file) runner = Runner(commands, packages, start_at=args.start_at) return runner()
Console script entry point.
def remove(self, list): xml = SP.DeleteList(SP.listName(list.id)) self.opener.post_soap(LIST_WEBSERVICE, xml, soapaction='http://schemas.microsoft.com/sharepoint/soap/DeleteList') self.all_lists.remove(list)
Removes a list from the site.
def create(self, name, description='', template=100): try: template = int(template) except ValueError: template = LIST_TEMPLATES[template] if name in self: raise ValueError("List already exists: '{0}".format(name)) if uuid_re.match(name): raise ValueError("Cannot create a list with a UUID as a name") xml = SP.AddList(SP.listName(name), SP.description(description), SP.templateID(text_type(template))) result = self.opener.post_soap(LIST_WEBSERVICE, xml, soapaction='http://schemas.microsoft.com/sharepoint/soap/AddList') list_element = result.xpath('sp:AddListResult/sp:List', namespaces=namespaces)[0] self._all_lists.append(SharePointList(self.opener, self, list_element))
Creates a new list in the site.
def Row(self): if not hasattr(self, '_row_class'): attrs = {'fields': self.fields, 'list': self, 'opener': self.opener} for field in self.fields.values(): attrs[field.name] = field.descriptor self._row_class = type('SharePointListRow', (SharePointListRow,), attrs) return self._row_class
The class for a row in this list.
def append(self, row): if isinstance(row, dict): row = self.Row(row) elif isinstance(row, self.Row): pass elif isinstance(row, SharePointListRow): raise TypeError("row must be a dict or an instance of SharePointList.Row, not SharePointListRow") else: raise TypeError("row must be a dict or an instance of SharePointList.Row") self.rows # Make sure self._rows exists. self._rows.append(row) return row
Appends a row to the list. Takes a dictionary, returns a row.
def remove(self, row): self._rows.remove(row) self._deleted_rows.add(row)
Removes the row from the list.
def get_batch_method(self): if not self._changed: return None batch_method = E.Method(Cmd='Update' if self.id else 'New') batch_method.append(E.Field(text_type(self.id) if self.id else 'New', Name='ID')) for field in self.fields.values(): if field.name in self._changed: value = field.unparse(self._data[field.name] or '') batch_method.append(E.Field(value, Name=field.name)) return batch_method
Returns a change batch for SharePoint's UpdateListItems operation.
def convert_to_python(self, xmlrpc=None): if xmlrpc: return xmlrpc.get(self.name, self.default) elif self.default: return self.default else: return None
Extracts a value for the field from an XML-RPC response.
def get_outputs(self, input_value): output_value = self.convert_to_xmlrpc(input_value) output = {} for name in self.output_names: output[name] = output_value return output
Generate a set of output values for a given input.
def struct(self): data = {} for var, fmap in self._def.items(): if hasattr(self, var): data.update(fmap.get_outputs(getattr(self, var))) return data
XML-RPC-friendly representation of the current object state
def get_args(self, client): default_args = self.default_args(client) if self.method_args or self.optional_args: optional_args = getattr(self, 'optional_args', tuple()) args = [] for arg in (self.method_args + optional_args): if hasattr(self, arg): obj = getattr(self, arg) if hasattr(obj, 'struct'): args.append(obj.struct) else: args.append(obj) args = list(default_args) + args else: args = default_args return args
Builds final set of XML-RPC method arguments based on the method's arguments, any default arguments, and their defined respective ordering.
def process_result(self, raw_result): if self.results_class and raw_result: if isinstance(raw_result, dict_type): return self.results_class(raw_result) elif isinstance(raw_result, collections.Iterable): return [self.results_class(result) for result in raw_result] return raw_result
Performs actions on the raw result from the XML-RPC response. If a `results_class` is defined, the response will be converted into one or more object instances of that class.
def parse(self, text): '''Returns a list of addresses found in text together with parsed address parts ''' results = [] if isinstance(text, str): if six.PY2: text = unicode(text, 'utf-8') self.clean_text = self._normalize_string(text) # get addresses addresses = set(self._get_addresses(self.clean_text)) if addresses: # append parsed address info results = list(map(self._parse_address, addresses)) return resultf parse(self, text): '''Returns a list of addresses found in text together with parsed address parts ''' results = [] if isinstance(text, str): if six.PY2: text = unicode(text, 'utf-8') self.clean_text = self._normalize_string(text) # get addresses addresses = set(self._get_addresses(self.clean_text)) if addresses: # append parsed address info results = list(map(self._parse_address, addresses)) return results
Returns a list of addresses found in text together with parsed address parts
def _parse_address(self, address_string): '''Parses address into parts''' match = utils.match(self.rules, address_string, flags=re.VERBOSE | re.U) if match: match_as_dict = match.groupdict() match_as_dict.update({'country_id': self.country}) # combine results cleaned_dict = self._combine_results(match_as_dict) # create object containing results return address.Address(**cleaned_dict) return Falsf _parse_address(self, address_string): '''Parses address into parts''' match = utils.match(self.rules, address_string, flags=re.VERBOSE | re.U) if match: match_as_dict = match.groupdict() match_as_dict.update({'country_id': self.country}) # combine results cleaned_dict = self._combine_results(match_as_dict) # create object containing results return address.Address(**cleaned_dict) return False
Parses address into parts
def _combine_results(self, match_as_dict): '''Combine results from different parsed parts: we look for non-empty results in values like 'postal_code_b' or 'postal_code_c' and store them as main value. So 'postal_code_b':'123456' becomes: 'postal_code' :'123456' ''' keys = [] vals = [] for k, v in six.iteritems(match_as_dict): if k[-2:] in '_a_b_c_d_e_f_g_h_i_j_k_l_m': if v: # strip last 2 chars: '..._b' -> '...' keys.append(k[:-2]) vals.append(v) else: if k not in keys: keys.append(k) vals.append(v) return dict(zip(keys, vals)f _combine_results(self, match_as_dict): '''Combine results from different parsed parts: we look for non-empty results in values like 'postal_code_b' or 'postal_code_c' and store them as main value. So 'postal_code_b':'123456' becomes: 'postal_code' :'123456' ''' keys = [] vals = [] for k, v in six.iteritems(match_as_dict): if k[-2:] in '_a_b_c_d_e_f_g_h_i_j_k_l_m': if v: # strip last 2 chars: '..._b' -> '...' keys.append(k[:-2]) vals.append(v) else: if k not in keys: keys.append(k) vals.append(v) return dict(zip(keys, vals))
Combine results from different parsed parts: we look for non-empty results in values like 'postal_code_b' or 'postal_code_c' and store them as main value. So 'postal_code_b':'123456' becomes: 'postal_code' :'123456'
def _normalize_string(self, text): '''Prepares incoming text for parsing: removes excessive spaces, tabs, newlines, etc. ''' conversion = { # newlines '\r?\n': ' ', # replace excessive empty spaces '\s+': ' ', # convert all types of hyphens/dashes to a # simple old-school dash # from http://utf8-chartable.de/unicode-utf8-table.pl? # start=8192&number=128&utf8=string-literal '‐': '-', '‑': '-', 'β€’': '-', '–': '-', 'β€”': '-', '―': '-', } for find, replace in six.iteritems(conversion): text = re.sub(find, replace, text, flags=re.UNICODE) return texf _normalize_string(self, text): '''Prepares incoming text for parsing: removes excessive spaces, tabs, newlines, etc. ''' conversion = { # newlines '\r?\n': ' ', # replace excessive empty spaces '\s+': ' ', # convert all types of hyphens/dashes to a # simple old-school dash # from http://utf8-chartable.de/unicode-utf8-table.pl? # start=8192&number=128&utf8=string-literal '‐': '-', '‑': '-', 'β€’': '-', '–': '-', 'β€”': '-', '―': '-', } for find, replace in six.iteritems(conversion): text = re.sub(find, replace, text, flags=re.UNICODE) return text
Prepares incoming text for parsing: removes excessive spaces, tabs, newlines, etc.
def _get_addresses(self, text): '''Returns a list of addresses found in text''' # find addresses addresses = [] matches = utils.findall( self.rules, text, flags=re.VERBOSE | re.U) if(matches): for match in matches: addresses.append(match[0].strip()) return addressef _get_addresses(self, text): '''Returns a list of addresses found in text''' # find addresses addresses = [] matches = utils.findall( self.rules, text, flags=re.VERBOSE | re.U) if(matches): for match in matches: addresses.append(match[0].strip()) return addresses
Returns a list of addresses found in text
def parse(some_text, **kwargs): ap = parser.AddressParser(**kwargs) return ap.parse(some_text)
Creates request to AddressParser and returns list of Address objects
def setAttribute(values, value): if isinstance(value, int): values.add().int32_value = value elif isinstance(value, float): values.add().double_value = value elif isinstance(value, long): values.add().int64_value = value elif isinstance(value, str): values.add().string_value = value elif isinstance(value, bool): values.add().bool_value = value elif isinstance(value, (list, tuple, array.array)): for v in value: setAttribute(values, v) elif isinstance(value, dict): for key in value: setAttribute( values.add().attributes.attr[key].values, value[key]) else: values.add().string_value = str(value)
Takes the values of an attribute value list and attempts to append attributes of the proper type, inferred from their Python type.
def deepSetAttr(obj, path, val): first, _, rest = path.rpartition('.') return setattr(deepGetAttr(obj, first) if first else obj, rest, val)
Sets a deep attribute on an object by resolving a dot-delimited path. If path does not exist an `AttributeError` will be raised`.
def encodeValue(value): if isinstance(value, (list, tuple)): return [common.AttributeValue(string_value=str(v)) for v in value] else: return [common.AttributeValue(string_value=str(value))]
TODO
def convertDatetime(t): epoch = datetime.datetime.utcfromtimestamp(0) delta = t - epoch millis = delta.total_seconds() * 1000 return int(millis)
Converts the specified datetime object into its appropriate protocol value. This is the number of milliseconds from the epoch.
def getValueFromValue(value): if type(value) != common.AttributeValue: raise TypeError( "Expected an AttributeValue, but got {}".format(type(value))) if value.WhichOneof("value") is None: raise AttributeError("Nothing set for {}".format(value)) return getattr(value, value.WhichOneof("value"))
Extract the currently set field from a Value structure
def toJson(protoObject, indent=None): # Using the internal method because this way we can reformat the JSON js = json_format.MessageToDict(protoObject, False) return json.dumps(js, indent=indent)
Serialises a protobuf object as json
def getProtocolClasses(superclass=message.Message): # We keep a manual list of the superclasses that we define here # so we can filter them out when we're getting the protocol # classes. superclasses = set([message.Message]) thisModule = sys.modules[__name__] subclasses = [] for name, class_ in inspect.getmembers(thisModule): if ((inspect.isclass(class_) and issubclass(class_, superclass) and class_ not in superclasses)): subclasses.append(class_) return subclasses
Returns all the protocol classes that are subclasses of the specified superclass. Only 'leaf' classes are returned, corresponding directly to the classes defined in the protocol.
def runCommandSplits(splits, silent=False, shell=False): try: if silent: with open(os.devnull, 'w') as devnull: subprocess.check_call( splits, stdout=devnull, stderr=devnull, shell=shell) else: subprocess.check_call(splits, shell=shell) except OSError as exception: if exception.errno == 2: # cmd not found raise Exception( "Can't find command while trying to run {}".format(splits)) else: raise
Run a shell command given the command's parsed command line
def runCommand(command, silent=False, shell=False): splits = shlex.split(command) runCommandSplits(splits, silent=silent, shell=shell)
Run a shell command
def _createSchemaFiles(self, destPath, schemasPath): # Create the target directory hierarchy, if neccessary ga4ghPath = os.path.join(destPath, 'ga4gh') if not os.path.exists(ga4ghPath): os.mkdir(ga4ghPath) ga4ghSchemasPath = os.path.join(ga4ghPath, 'schemas') if not os.path.exists(ga4ghSchemasPath): os.mkdir(ga4ghSchemasPath) ga4ghSchemasGa4ghPath = os.path.join(ga4ghSchemasPath, 'ga4gh') if not os.path.exists(ga4ghSchemasGa4ghPath): os.mkdir(ga4ghSchemasGa4ghPath) ga4ghSchemasGooglePath = os.path.join(ga4ghSchemasPath, 'google') if not os.path.exists(ga4ghSchemasGooglePath): os.mkdir(ga4ghSchemasGooglePath) ga4ghSchemasGoogleApiPath = os.path.join( ga4ghSchemasGooglePath, 'api') if not os.path.exists(ga4ghSchemasGoogleApiPath): os.mkdir(ga4ghSchemasGoogleApiPath) # rewrite the proto files to the destination for root, dirs, files in os.walk(schemasPath): for protoFilePath in fnmatch.filter(files, '*.proto'): src = os.path.join(root, protoFilePath) dst = os.path.join( ga4ghSchemasPath, os.path.relpath(root, schemasPath), protoFilePath) self._copySchemaFile(src, dst)
Create a hierarchy of proto files in a destination directory, copied from the schemasPath hierarchy
def _doLineReplacements(self, line): # ga4gh packages packageString = 'package ga4gh;' if packageString in line: return line.replace( packageString, 'package ga4gh.schemas.ga4gh;') importString = 'import "ga4gh/' if importString in line: return line.replace( importString, 'import "ga4gh/schemas/ga4gh/') # google packages googlePackageString = 'package google.api;' if googlePackageString in line: return line.replace( googlePackageString, 'package ga4gh.schemas.google.api;') googleImportString = 'import "google/api/' if googleImportString in line: return line.replace( googleImportString, 'import "ga4gh/schemas/google/api/') optionString = 'option (google.api.http)' if optionString in line: return line.replace( optionString, 'option (.ga4gh.schemas.google.api.http)') return line
Given a line of a proto file, replace the line with one that is appropriate for the hierarchy that we want to compile
def _copySchemaFile(self, src, dst): with open(src) as srcFile, open(dst, 'w') as dstFile: srcLines = srcFile.readlines() for srcLine in srcLines: toWrite = self._doLineReplacements(srcLine) dstFile.write(toWrite)
Copy a proto file to the temporary directory, with appropriate line replacements
def convert_protodef_to_editable(proto): class Editable(object): def __init__(self, prot): self.kind = type(prot) self.name = prot.name self.comment = "" self.options = dict([(key.name, value) for (key, value) in prot.options.ListFields()]) if isinstance(prot, EnumDescriptorProto): self.value = [convert_protodef_to_editable(x) for x in prot.value] elif isinstance(prot, DescriptorProto): self.field = [convert_protodef_to_editable(x) for x in prot.field] self.enum_type = [convert_protodef_to_editable(x) for x in prot.enum_type] self.nested_type = prot.nested_type self.oneof_decl = prot.oneof_decl elif isinstance(prot, EnumValueDescriptorProto): self.number = prot.number elif isinstance(prot, FieldDescriptorProto): if prot.type in [11, 14]: self.ref_type = prot.type_name[1:] self.type = prot.type self.label = prot.label elif isinstance(prot, ServiceDescriptorProto): self.method = [convert_protodef_to_editable(x) for x in prot.method] elif isinstance(prot, MethodDescriptorProto): self.input_type = prot.input_type self.output_type = prot.output_type else: raise Exception, type(prot) return Editable(proto)
Protobuf objects can't have arbitrary fields addedd and we need to later on add comments to them, so we instead make "Editable" objects that can do so
def type_to_string(f, map_types): if f.type in [1]: return "double" elif f.type in [2]: return "float" elif f.type in [3]: return "long" elif f.type in [4]: return "uint64" elif f.type in [5]: return "integer" elif f.type in [6]: return "fixed64" elif f.type in [7]: return "fixed32" elif f.type in [8]: return "boolean" elif f.type in [9]: return "string" # missing type 10 - Group elif f.type in [11, 14]: ref_name = f.ref_type if ref_name in map_types: ref_fields = map_types[ref_name] return { "type": "map", "key": " %s "% type_to_string(ref_fields["key"], map_types), "value": " %s "% type_to_string(ref_fields["value"], map_types) } else: kind = ":protobuf:message:`%s`" % simplify_name(f.ref_type) if f.label == 3: # LABEL_REPEATED return "list of " + kind else: return kind elif f.type in [12]: return "bytes" elif f.type in [13]: return "uint32" elif f.type in [15]: return "sfixed32" elif f.type in [16]: return "sfixed64" elif f.type in [17]: return "sint32" elif f.type in [18]: return "sint64" else: raise Exception, f.type
Convert type info to pretty names, based on numbers from from FieldDescriptorProto https://developers.google.com/protocol-buffers/docs/reference/cpp/google.protobuf.descriptor.pb
def haversine(point1, point2, unit='km'): # mean earth radius - https://en.wikipedia.org/wiki/Earth_radius#Mean_radius AVG_EARTH_RADIUS_KM = 6371.0088 # Units values taken from http://www.unitconversion.org/unit_converter/length.html conversions = {'km': 1, 'm': 1000, 'mi': 0.621371192, 'nmi': 0.539956803, 'ft': 3280.839895013, 'in': 39370.078740158} # get earth radius in required units avg_earth_radius = AVG_EARTH_RADIUS_KM * conversions[unit] # unpack latitude/longitude lat1, lng1 = point1 lat2, lng2 = point2 # convert all latitudes/longitudes from decimal degrees to radians lat1, lng1, lat2, lng2 = map(radians, (lat1, lng1, lat2, lng2)) # calculate haversine lat = lat2 - lat1 lng = lng2 - lng1 d = sin(lat * 0.5) ** 2 + cos(lat1) * cos(lat2) * sin(lng * 0.5) ** 2 return 2 * avg_earth_radius * asin(sqrt(d))
Calculate the great-circle distance between two points on the Earth surface. :input: two 2-tuples, containing the latitude and longitude of each point in decimal degrees. Keyword arguments: unit -- a string containing the initials of a unit of measurement (i.e. miles = mi) default 'km' (kilometers). Example: haversine((45.7597, 4.8422), (48.8567, 2.3508)) :output: Returns the distance between the two points. The default returned unit is kilometers. The default unit can be changed by setting the unit parameter to a string containing the initials of the desired unit. Other available units are miles (mi), nautic miles (nmi), meters (m), feets (ft) and inches (in).
def main(): logging.basicConfig(level=logging.INFO) run_metrics = py_interop_run_metrics.run_metrics() summary = py_interop_summary.run_summary() valid_to_load = py_interop_run.uchar_vector(py_interop_run.MetricCount, 0) py_interop_run_metrics.list_summary_metrics_to_load(valid_to_load) for run_folder_path in sys.argv[1:]: run_folder = os.path.basename(run_folder_path) try: run_metrics.read(run_folder_path, valid_to_load) except Exception, ex: logging.warn("Skipping - cannot read RunInfo.xml: %s - %s"%(run_folder, str(ex))) continue py_interop_summary.summarize_run_metrics(run_metrics, summary) error_rate_read_lane_surface = numpy.zeros((summary.size(), summary.lane_count(), summary.surface_count())) for read_index in xrange(summary.size()): for lane_index in xrange(summary.lane_count()): for surface_index in xrange(summary.surface_count()): error_rate_read_lane_surface[read_index, lane_index, surface_index] = \ summary.at(read_index).at(lane_index).at(surface_index).error_rate().mean() logging.info("Run Folder: "+run_folder) for read_index in xrange(summary.size()): read_summary = summary.at(read_index) logging.info("Read "+str(read_summary.read().number())+" - Top Surface Mean Error: "+str(error_rate_read_lane_surface[read_index, :, 0].mean()))
Retrieve run folder paths from the command line Ensure only metrics required for summary are loaded Load the run metrics Calculate the summary metrics Display error by lane, read
def login(self, user, passwd): '''Logs the user into SecurityCenter and stores the needed token and cookies.''' resp = self.post('token', json={'username': user, 'password': passwd}) self._token = resp.json()['response']['token'f login(self, user, passwd): '''Logs the user into SecurityCenter and stores the needed token and cookies.''' resp = self.post('token', json={'username': user, 'password': passwd}) self._token = resp.json()['response']['token']
Logs the user into SecurityCenter and stores the needed token and cookies.
def update(sc, filename, asset_id): ''' Updates a DNS Asset List with the contents of the filename. The assumed format of the file is 1 entry per line. This function will convert the file contents into an array of entries and then upload that array into SecurityCenter. ''' addresses = [] with open(filename) as hostfile: for line in hostfile.readlines(): addresses.append(line.strip('\n')) sc.asset_update(asset_id, dns=addressesf update(sc, filename, asset_id): ''' Updates a DNS Asset List with the contents of the filename. The assumed format of the file is 1 entry per line. This function will convert the file contents into an array of entries and then upload that array into SecurityCenter. ''' addresses = [] with open(filename) as hostfile: for line in hostfile.readlines(): addresses.append(line.strip('\n')) sc.asset_update(asset_id, dns=addresses)
Updates a DNS Asset List with the contents of the filename. The assumed format of the file is 1 entry per line. This function will convert the file contents into an array of entries and then upload that array into SecurityCenter.
def generate_html_report(base_path, asset_id): ''' Generates the HTML report and dumps it into the specified filename ''' jenv = Environment(loader=PackageLoader('swchange', 'templates')) s = Session() #hosts = s.query(Host).filter_by(asset_id=asset_id).all() asset = s.query(AssetList).filter_by(id=asset_id).first() if not asset: print 'Invalid Asset ID (%s)!' % asset_id return filename = os.path.join(base_path, '%s-INV-CHANGE-%s.html' % ( asset.name, datetime.now().strftime('%Y-%m-%d.%H.%M.%S')) ) print 'Generating Report : %s' % filename with open(filename, 'wb') as report: report.write(jenv.get_template('layout.html').render( asset=asset, current_date=datetime.now() )f generate_html_report(base_path, asset_id): ''' Generates the HTML report and dumps it into the specified filename ''' jenv = Environment(loader=PackageLoader('swchange', 'templates')) s = Session() #hosts = s.query(Host).filter_by(asset_id=asset_id).all() asset = s.query(AssetList).filter_by(id=asset_id).first() if not asset: print 'Invalid Asset ID (%s)!' % asset_id return filename = os.path.join(base_path, '%s-INV-CHANGE-%s.html' % ( asset.name, datetime.now().strftime('%Y-%m-%d.%H.%M.%S')) ) print 'Generating Report : %s' % filename with open(filename, 'wb') as report: report.write(jenv.get_template('layout.html').render( asset=asset, current_date=datetime.now() ))
Generates the HTML report and dumps it into the specified filename
def gen_csv(sc, filename): '''csv SecurityCenterObj, EmailAddress ''' # First thing we need to do is initialize the csvfile and build the header # for the file. datafile = open(filename, 'wb') csvfile = csv.writer(datafile) csvfile.writerow(['Software Package Name', 'Count']) debug.write('Generating %s: ' % filename) # Next we will run the Security Center query. because this could be a # potentially very large dataset that is returned, we don't want to run out # of memory. To get around this, we will pass the query function the writer # function with the appropriate fields so that it is parsed inline. fparams = {'fobj': csvfile} sc.query('listsoftware', func=writer, func_params=fparams) debug.write('\n') # Lastly we need to close the datafile. datafile.close(f gen_csv(sc, filename): '''csv SecurityCenterObj, EmailAddress ''' # First thing we need to do is initialize the csvfile and build the header # for the file. datafile = open(filename, 'wb') csvfile = csv.writer(datafile) csvfile.writerow(['Software Package Name', 'Count']) debug.write('Generating %s: ' % filename) # Next we will run the Security Center query. because this could be a # potentially very large dataset that is returned, we don't want to run out # of memory. To get around this, we will pass the query function the writer # function with the appropriate fields so that it is parsed inline. fparams = {'fobj': csvfile} sc.query('listsoftware', func=writer, func_params=fparams) debug.write('\n') # Lastly we need to close the datafile. datafile.close()
csv SecurityCenterObj, EmailAddress
def post(self, path, **kwargs): '''Calls the specified path with the POST method''' resp = self._session.post(self._url(path), **self._builder(**kwargs)) if 'stream' in kwargs: return resp else: return self._resp_error_check(respf post(self, path, **kwargs): '''Calls the specified path with the POST method''' resp = self._session.post(self._url(path), **self._builder(**kwargs)) if 'stream' in kwargs: return resp else: return self._resp_error_check(resp)
Calls the specified path with the POST method
def import_repo(self, repo_id, fileobj): ''' Imports a repository package using the repository ID specified. ''' # Step 1, lets upload the file filename = self.upload(fileobj).json()['response']['filename'] # Step 2, lets tell SecurityCenter what to do with the file return self.post('repository/{}/import'.format(repo_id), json={'file': filename}f import_repo(self, repo_id, fileobj): ''' Imports a repository package using the repository ID specified. ''' # Step 1, lets upload the file filename = self.upload(fileobj).json()['response']['filename'] # Step 2, lets tell SecurityCenter what to do with the file return self.post('repository/{}/import'.format(repo_id), json={'file': filename})
Imports a repository package using the repository ID specified.
def _revint(self, version): ''' Internal function to convert a version string to an integer. ''' intrev = 0 vsplit = version.split('.') for c in range(len(vsplit)): item = int(vsplit[c]) * (10 ** (((len(vsplit) - c - 1) * 2))) intrev += item return intref _revint(self, version): ''' Internal function to convert a version string to an integer. ''' intrev = 0 vsplit = version.split('.') for c in range(len(vsplit)): item = int(vsplit[c]) * (10 ** (((len(vsplit) - c - 1) * 2))) intrev += item return intrev
Internal function to convert a version string to an integer.