code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def log(context): context.obj.find_repo_type() if context.obj.vc_name == 'git': format = ("--pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset " "%s %Cgreen(%cr) %C(bold blue)<%an>%Creset'") context.obj.call(['git', 'log', '--graph', format, '--abbrev-commit', '--stat']) elif context.obj.vc_name == 'hg': template = ( '"changeset: {rev}:{node|short} {tags}\n' ' summary: {desc|firstline|fill68|tabindent|tabindent}"') context.obj.call(['hg', 'log', '-G', '--template', template])
See history
def diff(context, file_name): context.obj.find_repo_type() if context.obj.vc_name == 'git': context.obj.call(['git', 'diff', '--color-words', '--ignore-space-change', file_name]) elif context.obj.vc_name == 'hg': context.obj.call(['hg', 'diff', file_name])
See changes that occured since last check in
def call(self, args, devnull=False): if self.debug: click.echo(subprocess.list2cmdline(args)) click.confirm('Continue?', default=True, abort=True) try: kwargs = {} if devnull: # Pipe to /dev/null (or equivalent). kwargs['stderr'] = subprocess.STDOUT kwargs['stdout'] = self.FNULL ret_code = subprocess.call(args, **kwargs) except subprocess.CalledProcessError: return False return ret_code
Call other processes. args - list of command args devnull - whether to pipe stdout to /dev/null (or equivalent)
def find_repo_type(self): is_git = self.call(['git', 'rev-parse', '--is-inside-work-tree'], devnull=True) if is_git != 0: if self.debug: click.echo('not git') is_hg = self.call(['hg', '-q', 'stat'], devnull=True) if is_hg != 0: if self.debug: click.echo('not hg') exit(1) else: self.vc_name = 'hg'
Check for git or hg repository
def main(): # Parse command line arguments argp = _cli_argument_parser() args = argp.parse_args() # setup logging logging.basicConfig( level=args.loglevel, format="%(levelname)s %(message)s") console.display("Collecting documentation from files") collector_metrics = metrics.Metrics() docs = collector.parse(args.path, args.trace_parser, metrics=collector_metrics) collector_metrics.display() console.display("Rendering documentation") try: if args.output: template = renderer.template_from_filename(args.output) else: template = "json" out = renderer.render(docs, template) except ValueError as err: logging.error(err) sys.exit(1) except TemplateNotFound as err: logging.error( "Template `{}` not found. Available templates are: {}".format( err.name, renderer.list_templates())) sys.exit(1) if not args.output: print(out) else: console.display("Writing documentation to", args.output) with io.open(args.output, "w", encoding="utf-8") as fp: fp.write(out)
The main entry point of the program
def process_ioc(args): client = IndicatorClient.from_config() client.set_debug(True) if args.get: response = client.get_indicators() elif args.single: response = client.add_indicators(indicators=[args.single], private=args.private, tags=args.tags) else: if not os.path.isfile(args.file): raise Exception("File path isn't valid!") indicators = list() with open(args.file, 'r') as handle: for line in handle: line = line.strip() if line == '': continue indicators.append(line) response = client.add_indicators(indicators=indicators, private=args.private, tags=args.tags) return response
Process actions related to the IOC switch.
def process_events(args): client = EventsClient.from_config() client.set_debug(True) if args.get: response = client.get_events() elif args.flush: response = client.flush_events() return response
Process actions related to events switch.
def main(): parser = ArgumentParser(description="Blockade Analyst Bench") subs = parser.add_subparsers(dest='cmd') ioc = subs.add_parser('ioc', help="Perform actions with IOCs") ioc.add_argument('--single', '-s', help="Send a single IOC") ioc.add_argument('--file', '-f', help="Parse a file of IOCs") ioc.add_argument('--private', '-p', action="store_true", help="Submit the IOCs to the node hashed, \ instead of in clear") ioc.add_argument('--tags', '-t', help="Add a comma-separated list of tags to store \ with the indicators") ioc.add_argument('--get', '-g', action="store_true", help="List indicators on the remote node") events = subs.add_parser('events', help="Perform actions with Events") events.add_argument('--get', '-g', action='store_true', help="Get recent events") events.add_argument('--flush', '-f', action='store_true', help="Flush all events from cloud node") args, unknown = parser.parse_known_args() try: if args.cmd == 'ioc': if (args.single and args.file): raise Exception("Can't use single and file together!") if (not args.single and not args.file and not args.get): ioc.print_help() sys.exit(1) response = process_ioc(args) elif args.cmd == 'events': if (not args.get and not args.flush): events.print_help() sys.exit(1) response = process_events(args) else: parser.print_usage() sys.exit(1) except ValueError as e: parser.print_usage() sys.stderr.write('{}\n'.format(str(e))) sys.exit(1) print(response.get('message', ''))
Run the code.
def create_store_credit_transaction(cls, store_credit_transaction, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_store_credit_transaction_with_http_info(store_credit_transaction, **kwargs) else: (data) = cls._create_store_credit_transaction_with_http_info(store_credit_transaction, **kwargs) return data
Create StoreCreditTransaction Create a new StoreCreditTransaction This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_store_credit_transaction(store_credit_transaction, async=True) >>> result = thread.get() :param async bool :param StoreCreditTransaction store_credit_transaction: Attributes of storeCreditTransaction to create (required) :return: StoreCreditTransaction If the method is called asynchronously, returns the request thread.
def delete_store_credit_transaction_by_id(cls, store_credit_transaction_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_store_credit_transaction_by_id_with_http_info(store_credit_transaction_id, **kwargs) else: (data) = cls._delete_store_credit_transaction_by_id_with_http_info(store_credit_transaction_id, **kwargs) return data
Delete StoreCreditTransaction Delete an instance of StoreCreditTransaction by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_store_credit_transaction_by_id(store_credit_transaction_id, async=True) >>> result = thread.get() :param async bool :param str store_credit_transaction_id: ID of storeCreditTransaction to delete. (required) :return: None If the method is called asynchronously, returns the request thread.
def get_store_credit_transaction_by_id(cls, store_credit_transaction_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_store_credit_transaction_by_id_with_http_info(store_credit_transaction_id, **kwargs) else: (data) = cls._get_store_credit_transaction_by_id_with_http_info(store_credit_transaction_id, **kwargs) return data
Find StoreCreditTransaction Return single instance of StoreCreditTransaction by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_store_credit_transaction_by_id(store_credit_transaction_id, async=True) >>> result = thread.get() :param async bool :param str store_credit_transaction_id: ID of storeCreditTransaction to return (required) :return: StoreCreditTransaction If the method is called asynchronously, returns the request thread.
def list_all_store_credit_transactions(cls, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_store_credit_transactions_with_http_info(**kwargs) else: (data) = cls._list_all_store_credit_transactions_with_http_info(**kwargs) return data
List StoreCreditTransactions Return a list of StoreCreditTransactions This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_store_credit_transactions(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[StoreCreditTransaction] If the method is called asynchronously, returns the request thread.
def replace_store_credit_transaction_by_id(cls, store_credit_transaction_id, store_credit_transaction, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_store_credit_transaction_by_id_with_http_info(store_credit_transaction_id, store_credit_transaction, **kwargs) else: (data) = cls._replace_store_credit_transaction_by_id_with_http_info(store_credit_transaction_id, store_credit_transaction, **kwargs) return data
Replace StoreCreditTransaction Replace all attributes of StoreCreditTransaction This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_store_credit_transaction_by_id(store_credit_transaction_id, store_credit_transaction, async=True) >>> result = thread.get() :param async bool :param str store_credit_transaction_id: ID of storeCreditTransaction to replace (required) :param StoreCreditTransaction store_credit_transaction: Attributes of storeCreditTransaction to replace (required) :return: StoreCreditTransaction If the method is called asynchronously, returns the request thread.
def update_store_credit_transaction_by_id(cls, store_credit_transaction_id, store_credit_transaction, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_store_credit_transaction_by_id_with_http_info(store_credit_transaction_id, store_credit_transaction, **kwargs) else: (data) = cls._update_store_credit_transaction_by_id_with_http_info(store_credit_transaction_id, store_credit_transaction, **kwargs) return data
Update StoreCreditTransaction Update attributes of StoreCreditTransaction This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_store_credit_transaction_by_id(store_credit_transaction_id, store_credit_transaction, async=True) >>> result = thread.get() :param async bool :param str store_credit_transaction_id: ID of storeCreditTransaction to update. (required) :param StoreCreditTransaction store_credit_transaction: Attributes of storeCreditTransaction to update. (required) :return: StoreCreditTransaction If the method is called asynchronously, returns the request thread.
def window(iterable, size=2): ''' yields wondows of a given size ''' iterable = iter(iterable) d = deque(islice(iterable, size-1), maxlen=size) for _ in map(d.append, iterable): yield tuple(df window(iterable, size=2): ''' yields wondows of a given size ''' iterable = iter(iterable) d = deque(islice(iterable, size-1), maxlen=size) for _ in map(d.append, iterable): yield tuple(d)
yields wondows of a given size
def payment_mode(self, payment_mode): allowed_values = ["authorize", "capture"] if payment_mode is not None and payment_mode not in allowed_values: raise ValueError( "Invalid value for `payment_mode` ({0}), must be one of {1}" .format(payment_mode, allowed_values) ) self._payment_mode = payment_mode
Sets the payment_mode of this CreditCardPayment. :param payment_mode: The payment_mode of this CreditCardPayment. :type: str
def create_credit_card_payment(cls, credit_card_payment, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_credit_card_payment_with_http_info(credit_card_payment, **kwargs) else: (data) = cls._create_credit_card_payment_with_http_info(credit_card_payment, **kwargs) return data
Create CreditCardPayment Create a new CreditCardPayment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_credit_card_payment(credit_card_payment, async=True) >>> result = thread.get() :param async bool :param CreditCardPayment credit_card_payment: Attributes of creditCardPayment to create (required) :return: CreditCardPayment If the method is called asynchronously, returns the request thread.
def delete_credit_card_payment_by_id(cls, credit_card_payment_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_credit_card_payment_by_id_with_http_info(credit_card_payment_id, **kwargs) else: (data) = cls._delete_credit_card_payment_by_id_with_http_info(credit_card_payment_id, **kwargs) return data
Delete CreditCardPayment Delete an instance of CreditCardPayment by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_credit_card_payment_by_id(credit_card_payment_id, async=True) >>> result = thread.get() :param async bool :param str credit_card_payment_id: ID of creditCardPayment to delete. (required) :return: None If the method is called asynchronously, returns the request thread.
def get_credit_card_payment_by_id(cls, credit_card_payment_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_credit_card_payment_by_id_with_http_info(credit_card_payment_id, **kwargs) else: (data) = cls._get_credit_card_payment_by_id_with_http_info(credit_card_payment_id, **kwargs) return data
Find CreditCardPayment Return single instance of CreditCardPayment by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_credit_card_payment_by_id(credit_card_payment_id, async=True) >>> result = thread.get() :param async bool :param str credit_card_payment_id: ID of creditCardPayment to return (required) :return: CreditCardPayment If the method is called asynchronously, returns the request thread.
def list_all_credit_card_payments(cls, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_credit_card_payments_with_http_info(**kwargs) else: (data) = cls._list_all_credit_card_payments_with_http_info(**kwargs) return data
List CreditCardPayments Return a list of CreditCardPayments This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_credit_card_payments(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[CreditCardPayment] If the method is called asynchronously, returns the request thread.
def replace_credit_card_payment_by_id(cls, credit_card_payment_id, credit_card_payment, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_credit_card_payment_by_id_with_http_info(credit_card_payment_id, credit_card_payment, **kwargs) else: (data) = cls._replace_credit_card_payment_by_id_with_http_info(credit_card_payment_id, credit_card_payment, **kwargs) return data
Replace CreditCardPayment Replace all attributes of CreditCardPayment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_credit_card_payment_by_id(credit_card_payment_id, credit_card_payment, async=True) >>> result = thread.get() :param async bool :param str credit_card_payment_id: ID of creditCardPayment to replace (required) :param CreditCardPayment credit_card_payment: Attributes of creditCardPayment to replace (required) :return: CreditCardPayment If the method is called asynchronously, returns the request thread.
def update_credit_card_payment_by_id(cls, credit_card_payment_id, credit_card_payment, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_credit_card_payment_by_id_with_http_info(credit_card_payment_id, credit_card_payment, **kwargs) else: (data) = cls._update_credit_card_payment_by_id_with_http_info(credit_card_payment_id, credit_card_payment, **kwargs) return data
Update CreditCardPayment Update attributes of CreditCardPayment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_credit_card_payment_by_id(credit_card_payment_id, credit_card_payment, async=True) >>> result = thread.get() :param async bool :param str credit_card_payment_id: ID of creditCardPayment to update. (required) :param CreditCardPayment credit_card_payment: Attributes of creditCardPayment to update. (required) :return: CreditCardPayment If the method is called asynchronously, returns the request thread.
def match_via_correlation(image, template, raw_tolerance=1, normed_tolerance=0.9): h, w = image.shape th, tw = template.shape # fft based convolution enables fast matching of large images correlation = fftconvolve(image, template[::-1,::-1]) # trim the returned image, fftconvolve returns an image of width: (Temp_w-1) + Im_w + (Temp_w -1), likewise height correlation = correlation[th-1:h, tw-1:w] # find images regions which are potentially matches match_position_dict = get_tiles_at_potential_match_regions(image, template, correlation, raw_tolerance=raw_tolerance) # bright spots in images can lead to false positivies- the normalisation carried out here eliminates those results = normalise_correlation(match_position_dict, correlation, template, normed_tolerance=normed_tolerance) return results
Matchihng algorithm based on normalised cross correlation. Using this matching prevents false positives occuring for bright patches in the image
def match_via_squared_difference(image, template, raw_tolerance=1, sq_diff_tolerance=0.1): h, w = image.shape th, tw = template.shape # fft based convolution enables fast matching of large images correlation = fftconvolve(image, template[::-1,::-1]) # trim the returned image, fftconvolve returns an image of width: (Temp_w-1) + Im_w + (Temp_w -1), likewise height correlation = correlation[th-1:h, tw-1:w] # find images regions which are potentially matches match_position_dict = get_tiles_at_potential_match_regions(image, template, correlation, raw_tolerance=raw_tolerance) # bright spots in images can lead to false positivies- the normalisation carried out here eliminates those results = calculate_squared_differences(match_position_dict, correlation, template, sq_diff_tolerance=sq_diff_tolerance) return results
Matchihng algorithm based on normalised cross correlation. Using this matching prevents false positives occuring for bright patches in the image
def match_via_correlation_coefficient(image, template, raw_tolerance=1, normed_tolerance=0.9): h, w = image.shape th, tw = template.shape temp_mean = np.mean(template) temp_minus_mean = template - temp_mean convolution = fftconvolve(image, temp_minus_mean[::-1,::-1]) convolution = convolution[th-1:h, tw-1:w] match_position_dict = get_tiles_at_potential_match_regions(image, template, convolution, method='correlation coefficient', raw_tolerance=raw_tolerance) # this is empty, so think condition is wrong results = normalise_correlation_coefficient(match_position_dict, convolution, template, normed_tolerance=normed_tolerance) return results
Matching algorithm based on 2-dimensional version of Pearson product-moment correlation coefficient. This is more robust in the case where the match might be scaled or slightly rotated. From experimentation, this method is less prone to false positives than the correlation method.
def match_positions(shape, list_of_coords): match_array = np.zeros(shape) try: # excpetion hit on this line if nothing in list_of_coords- i.e. no matches match_array[list_of_coords[:,0],list_of_coords[:,1]] = 1 labelled = label(match_array) objects = find_objects(labelled[0]) coords = [{'x':(slice_x.start, slice_x.stop),'y':(slice_y.start, slice_y.stop)} for (slice_y,slice_x) in objects] final_positions = [(int(np.mean(coords[i]['x'])),int(np.mean(coords[i]['y']))) for i in range(len(coords))] return final_positions except IndexError: print 'no matches found' # this error occurs if no matches are found return []
In cases where we have multiple matches, each highlighted by a region of coordinates, we need to separate matches, and find mean of each to return as match position
def md5_8_name(self, url): m = hashlib.md5() m.update(url.encode('utf-8')) return m.hexdigest()[:8] + os.path.splitext(url)[1]
把下载的文件重命名为地址的md5前8位
def delete_transaction_by_id(cls, transaction_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_transaction_by_id_with_http_info(transaction_id, **kwargs) else: (data) = cls._delete_transaction_by_id_with_http_info(transaction_id, **kwargs) return data
Delete Transaction Delete an instance of Transaction by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_transaction_by_id(transaction_id, async=True) >>> result = thread.get() :param async bool :param str transaction_id: ID of transaction to delete. (required) :return: None If the method is called asynchronously, returns the request thread.
def get_transaction_by_id(cls, transaction_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_transaction_by_id_with_http_info(transaction_id, **kwargs) else: (data) = cls._get_transaction_by_id_with_http_info(transaction_id, **kwargs) return data
Find Transaction Return single instance of Transaction by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_transaction_by_id(transaction_id, async=True) >>> result = thread.get() :param async bool :param str transaction_id: ID of transaction to return (required) :return: Transaction If the method is called asynchronously, returns the request thread.
def list_all_transactions(cls, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_transactions_with_http_info(**kwargs) else: (data) = cls._list_all_transactions_with_http_info(**kwargs) return data
List Transactions Return a list of Transactions This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_transactions(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[Transaction] If the method is called asynchronously, returns the request thread.
def is_empty(self): ''' Return `True` if form is valid and contains an empty lookup. ''' return (self.is_valid() and not self.simple_lookups and not self.complex_conditions and not self.extra_conditionsf is_empty(self): ''' Return `True` if form is valid and contains an empty lookup. ''' return (self.is_valid() and not self.simple_lookups and not self.complex_conditions and not self.extra_conditions)
Return `True` if form is valid and contains an empty lookup.
def load_inventory(hosts_file=HOSTS_FILE): '''Loads Ansible inventory from file. Parameters ---------- hosts_file: str, optional path to Ansible hosts file Returns ------- ConfigParser.SafeConfigParser content of `hosts_file` ''' inventory = SafeConfigParser(allow_no_value=True) if os.path.exists(hosts_file): inventory.read(hosts_file) else: logger.warn('inventory file doesn\'t exist: %s', hosts_file) return inventorf load_inventory(hosts_file=HOSTS_FILE): '''Loads Ansible inventory from file. Parameters ---------- hosts_file: str, optional path to Ansible hosts file Returns ------- ConfigParser.SafeConfigParser content of `hosts_file` ''' inventory = SafeConfigParser(allow_no_value=True) if os.path.exists(hosts_file): inventory.read(hosts_file) else: logger.warn('inventory file doesn\'t exist: %s', hosts_file) return inventory
Loads Ansible inventory from file. Parameters ---------- hosts_file: str, optional path to Ansible hosts file Returns ------- ConfigParser.SafeConfigParser content of `hosts_file`
def save_inventory(inventory, hosts_file=HOSTS_FILE): '''Saves Ansible inventory to file. Parameters ---------- inventory: ConfigParser.SafeConfigParser content of the `hosts_file` hosts_file: str, optional path to Ansible hosts file ''' with open(hosts_file, 'w') as f: inventory.write(ff save_inventory(inventory, hosts_file=HOSTS_FILE): '''Saves Ansible inventory to file. Parameters ---------- inventory: ConfigParser.SafeConfigParser content of the `hosts_file` hosts_file: str, optional path to Ansible hosts file ''' with open(hosts_file, 'w') as f: inventory.write(f)
Saves Ansible inventory to file. Parameters ---------- inventory: ConfigParser.SafeConfigParser content of the `hosts_file` hosts_file: str, optional path to Ansible hosts file
def called_with(self, *args, **kwargs): expected_call = Call(*args, **kwargs) if expected_call in calls(self.spy): return True raise VerificationError( "expected %s to be called with %s, but it wasn't" % ( self.spy, expected_call.formatted_args))
Return True if the spy was called with the specified args/kwargs. Otherwise raise VerificationError.
def not_called_with(self, *args, **kwargs): call = Call(*args, **kwargs) if call in calls(self.spy): raise VerificationError( 'expected %s to not be called with %s, but it was' % ( self.spy, call.formatted_args)) return True
Return True if spy was not called with the specified args/kwargs. Otherwise raise VerificationError.
def _init_config(self, width, height, spi=None, spiMosi= None, spiDC=None, spiCS=None, spiReset=None, spiClk=None): self._spi = spi self._spi_mosi = spiMosi self._spi_dc = spiDC self._spi_cs = spiCS self._spi_reset = spiReset self._spi_clk = spiClk self.width = width self.height = height
! SPI hardware and display width, height initialization.
def _init_io(self): GPIO.setwarnings(False) GPIO.setmode( GPIO.BCM ) pins = [ self._spi_dc ] for pin in pins: GPIO.setup( pin, GPIO.OUT )
! GPIO initialization. Set GPIO into BCM mode and init other IOs mode
def clear(self, fill = 0x00): self._buffer = [ fill ] * ( self.width * self.height )
! Clear buffer data and other data RPiDiaplay object just implemented clear buffer data
def import_users( path, resource_name=None, send_email_to_user=None, alternate_email=None, verbose=None, export_to_file=None, **kwargs, ): users = [] with open(path) as f: reader = csv.DictReader(f) for user_data in reader: username = user_data.get("username") site_names = user_data.get("sites").lower().split(",") group_names = user_data.get("groups").lower().split(",") first_name = user_data.get("first_name") last_name = user_data.get("last_name") email = user_data.get("email") o = UserImporter( username=username, first_name=first_name, last_name=last_name, email=email, site_names=site_names, group_names=group_names, resource_name=resource_name, send_email_to_user=send_email_to_user, alternate_email=alternate_email, verbose=verbose, **kwargs, ) users.append( { "username": o.user.username, "password": o.password, "first_name": o.user.first_name, "last_name": o.user.last_name, "sites": o.site_names, "groups": o.group_names, } ) if export_to_file: fieldnames = [ "username", "password", "first_name", "last_name", "sites", "groups", ] with open(path + "new.csv", "w+") as f: writer = csv.DictWriter(f, fieldnames=fieldnames) writer.writeheader() for user in users: writer.writerow(user)
Import users from a CSV file with columns: username first_name last_name email sites: a comma-separated list of sites groups: a comma-separated list of groups job_title
def connect(self): count = 1 no_of_servers = len(self._rabbit_urls) while True: server_choice = (count % no_of_servers) - 1 self._url = self._rabbit_urls[server_choice] try: logger.info('Connecting', attempt=count) return pika.SelectConnection(pika.URLParameters(self._url), self.on_connection_open, stop_ioloop_on_close=False) except pika.exceptions.AMQPConnectionError: logger.exception("Connection error") count += 1 logger.error("Connection sleep", no_of_seconds=count) time.sleep(count) continue
This method connects to RabbitMQ using a SelectConnection object, returning the connection handle. When the connection is established, the on_connection_open method will be invoked by pika. :rtype: pika.SelectConnection
def on_channel_open(self, channel): logger.info('Channel opened', channel=channel) self._channel = channel self.add_on_channel_close_callback() self.setup_exchange(self._exchange)
This method is invoked by pika when the channel has been opened. The channel object is passed in so we can make use of it. Since the channel is now open, we'll declare the exchange to use. :param pika.channel.Channel channel: The channel object
def setup_exchange(self, exchange_name): logger.info('Declaring exchange', name=exchange_name) self._channel.exchange_declare(self.on_exchange_declareok, exchange_name, self._exchange_type)
Setup the exchange on RabbitMQ by invoking the Exchange.Declare RPC command. When it is complete, the on_exchange_declareok method will be invoked by pika. :param str|unicode exchange_name: The name of the exchange to declare
def setup_queue(self, queue_name): logger.info('Declaring queue', name=queue_name) self._channel.queue_declare( self.on_queue_declareok, queue_name, durable=self._durable_queue )
Setup the queue on RabbitMQ by invoking the Queue.Declare RPC command. When it is complete, the on_queue_declareok method will be invoked by pika. :param str|unicode queue_name: The name of the queue to declare.
def on_queue_declareok(self, method_frame): logger.info('Binding to rabbit', exchange=self._exchange, queue=self._queue) self._channel.queue_bind(self.on_bindok, self._queue, self._exchange)
Method invoked by pika when the Queue.Declare RPC call made in setup_queue has completed. In this method we will bind the queue and exchange together with the routing key by issuing the Queue.Bind RPC command. When this command is complete, the on_bindok method will be invoked by pika. :param pika.frame.Method method_frame: The Queue.DeclareOk frame
def on_consumer_cancelled(self, method_frame): msg = 'Consumer was cancelled remotely, shutting down: {0!r}' logger.info(msg.format(method_frame)) if self._channel: self._channel.close()
Invoked by pika when RabbitMQ sends a Basic.Cancel for a consumer receiving messages. :param pika.frame.Method method_frame: The Basic.Cancel frame
def acknowledge_message(self, delivery_tag, **kwargs): logger.info('Acknowledging message', delivery_tag=delivery_tag, **kwargs) self._channel.basic_ack(delivery_tag)
Acknowledge the message delivery from RabbitMQ by sending a Basic.Ack RPC method for the delivery tag. :param int delivery_tag: The delivery tag from the Basic.Deliver frame
def nack_message(self, delivery_tag, **kwargs): logger.info('Nacking message', delivery_tag=delivery_tag, **kwargs) self._channel.basic_nack(delivery_tag)
Negative acknowledge a message :param int delivery_tag: The deliver tag from the Basic.Deliver frame
def reject_message(self, delivery_tag, requeue=False, **kwargs): logger.info('Rejecting message', delivery_tag=delivery_tag, **kwargs) self._channel.basic_reject(delivery_tag, requeue=requeue)
Reject the message delivery from RabbitMQ by sending a Basic.Reject RPC method for the delivery tag. :param int delivery_tag: The delivery tag from the Basic.Deliver frame
def on_message(self, unused_channel, basic_deliver, properties, body): logger.info( 'Received message', delivery_tag=basic_deliver.delivery_tag, app_id=properties.app_id, msg=body, ) self.acknowledge_message(basic_deliver.delivery_tag)
Invoked by pika when a message is delivered from RabbitMQ. The channel is passed for your convenience. The basic_deliver object that is passed in carries the exchange, routing key, delivery tag and a redelivered flag for the message. The properties passed in is an instance of BasicProperties with the message properties and the body is the message that was sent. :param pika.channel.Channel unused_channel: The channel object :param pika.Spec.Basic.Deliver: basic_deliver method :param pika.Spec.BasicProperties: properties :param str|unicode body: The message body
def stop_consuming(self): if self._channel: logger.info('Sending a Basic.Cancel RPC command to RabbitMQ') self._channel.basic_cancel(self.on_cancelok, self._consumer_tag)
Tell RabbitMQ that you would like to stop consuming by sending the Basic.Cancel RPC command.
def open_channel(self): logger.info('Creating a new channel') self._connection.channel(on_open_callback=self.on_channel_open)
Open a new channel with RabbitMQ by issuing the Channel.Open RPC command. When RabbitMQ responds that the channel is open, the on_channel_open callback will be invoked by pika.
def run(self): logger.debug('Running rabbit consumer') self._connection = self.connect() self._connection.ioloop.start()
Run the example consumer by connecting to RabbitMQ and then starting the IOLoop to block and allow the SelectConnection to operate.
def stop(self): logger.info('Stopping') self._closing = True self.stop_consuming() logger.info('Stopped')
Cleanly shutdown the connection to RabbitMQ by stopping the consumer with RabbitMQ. When RabbitMQ confirms the cancellation, on_cancelok will be invoked by pika, which will then closing the channel and connection. The IOLoop is started again because this method is invoked when CTRL-C is pressed raising a KeyboardInterrupt exception. This exception stops the IOLoop which needs to be running for pika to communicate with RabbitMQ. All of the commands issued prior to starting the IOLoop will be buffered but not processed.
def tx_id(properties): tx_id = properties.headers['tx_id'] logger.info("Retrieved tx_id from message properties: tx_id={}".format(tx_id)) return tx_id
Gets the tx_id for a message from a rabbit queue, using the message properties. Will raise KeyError if tx_id is missing from message headers. : param properties: Message properties : returns: tx_id of survey response : rtype: str
def authenticate(self, username, password): # Update the username and password bound to this instance for re-authentication needs. self._username = username self._password = password # Attempt to authenticate. resp = requests.get( self._url, auth=(username, password), **self._default_request_kwargs ) # Attempt to extract authentication data. try: if resp.status_code == 200: json_data = resp.json() token = json_data['data']['token'] elif resp.status_code == 401: raise errors.AuthFailure(resp.json().get('message', 'Authentication Failure.')) else: raise errors.AuthFailure( "Unknown exception while authenticating: '{}'".format(resp.text) ) except errors.AuthFailure: raise except Exception as ex: logging.exception(ex) raise errors.AuthFailure('{}: {}'.format(ex.__class__.__name__, ex)) # Update the token bound to this instance for use by other client operations layers. self._token = token logger.info('New API token received: "{}".'.format(token)) return token
Authenticate against the ObjectRocket API. :param str username: The username to perform basic authentication against the API with. :param str password: The password to perform basic authentication against the API with. :returns: A token used for authentication against token protected resources. :rtype: str
def _refresh(self): # Request and set a new API token. new_token = self.authenticate(self._username, self._password) self._token = new_token logger.info('New API token received: "{}".'.format(new_token)) return self._token
Refresh the API token using the currently bound credentials. This is simply a convenience method to be invoked automatically if authentication fails during normal client use.
def _verify(self, token): # Attempt to authenticate. url = '{}{}/'.format(self._url, 'verify') resp = requests.post( url, json={'token': token}, **self._default_request_kwargs ) if resp.status_code == 200: return resp.json().get('data', None) return None
Verify that the given token is valid. :param str token: The API token to verify. :returns: The token's corresponding user model as a dict, or None if invalid. :rtype: dict
def preprocess(net, image): ''' convert to Caffe input image layout ''' return np.float32(np.rollaxis(image, 2)[::-1]) - net.transformer.mean["data"f preprocess(net, image): ''' convert to Caffe input image layout ''' return np.float32(np.rollaxis(image, 2)[::-1]) - net.transformer.mean["data"]
convert to Caffe input image layout
def bx_encode(n, alphabet): if not isinstance(n, int): raise TypeError('an integer is required') base = len(alphabet) if n == 0: return alphabet[0] digits = [] while n > 0: digits.append(alphabet[n % base]) n = n // base digits.reverse() return ''.join(digits)
\ Encodes an integer :attr:`n` in base ``len(alphabet)`` with digits in :attr:`alphabet`. :: # 'ba' bx_encode(3, 'abc') :param n: a positive integer. :param alphabet: a 0-based iterable.
def bx_decode(string, alphabet, mapping=None): mapping = mapping or dict([(d, i) for (i, d) in enumerate(alphabet)]) base = len(alphabet) if not string: raise ValueError('string cannot be empty') if not isinstance(mapping, Mapping): raise TypeError('a Mapping is required') sum = 0 for digit in string: try: sum = base*sum + mapping[digit] except KeyError: raise ValueError( "invalid literal for bx_decode with base %i: '%s'" % (base, digit)) return sum
\ Transforms a string in :attr:`alphabet` to an integer. If :attr:`mapping` is provided, each key must map to its positional value without duplicates. :: mapping = {'a': 0, 'b': 1, 'c': 2} # 3 bx_decode('ba', 'abc', mapping) :param string: a string consisting of key from `alphabet`. :param alphabet: a 0-based iterable. :param mapping: a :class:`Mapping <collection.Mapping>`. If `None`, the inverse of `alphabet` is used, with values mapped to indices.
def _do_analysis(options): module = _function_location(options) core_results = _call_analysis_function(options, module) if module == 'emp' and ('models' in options.keys()): fit_results = _fit_models(options, core_results) else: fit_results = None _save_results(options, module, core_results, fit_results)
Do analysis for a single run, as specified by options. Parameters ---------- options : dict Option names and values for analysis
def _call_analysis_function(options, module): args, kwargs = _get_args_kwargs(options, module) return eval("%s.%s(*args, **kwargs)" % (module, options['analysis']))
Call function from module and get result, using inputs from options Parameters ---------- options : dict Option names and values for analysis module : str Short name of module within macroeco containing analysis function Returns ------- dataframe, array, value, list of tuples Functions from emp module return a list of tuples in which first element of the tuple gives a string describing the result and the second element giving the result of the analysis as a dataframe. Functions in other modules return dataframe, array, or value.
def _get_args_kwargs(options, module): if module == 'emp': options = _emp_extra_options(options) arg_names, kw_names = _arg_kwarg_lists(module, options['analysis']) # Create list of values for arg_names args = [] for arg_name in arg_names: if arg_name == 'patch': # For patch arg, append actual patch obj args.append(options['patch']) continue if arg_name == 'self': # Ignore self from class methods continue if arg_name == 'k': # scipy dists use k and x, we always use x arg_name = 'x' try: exec 'args.append(eval("%s"))' % options[arg_name] except SyntaxError: # eval failing because option is a string args.append(options[arg_name]) except: raise ValueError, ("Value for required argument %s not provided" % arg_name) # Create dict with vals for kw_names kwargs = {} for kw_name in kw_names: if kw_name in options.keys(): # If a value is given for this kwarg try: exec 'kwargs[kw_name] = eval("%s")' % options[kw_name] except SyntaxError: # eval failing because value is a string kwargs[kw_name] = options[kw_name] except: raise ValueError, ("Value for optional argument %s is invalid" % kw_name) return args, kwargs
Given an options (including analysis), and module, extract args and kwargs
def _emp_extra_options(options): # Check that metadata is valid metadata_path = os.path.normpath(os.path.join(options['param_dir'], options['metadata'])) if not os.path.isfile(metadata_path): raise IOError, ("Path to metadata file %s is invalid." % metadata_path) options['metadata_path'] = metadata_path # Using subset if given, create and store patch subset = options.get('subset', '') options['patch'] = emp.Patch(metadata_path, subset) # If cols or splits not given in options, make empty strings if 'cols' not in options.keys(): options['cols'] = '' if 'splits' not in options.keys(): options['splits'] = '' return options
Get special options patch, cols, and splits if analysis in emp module
def _fit_models(options, core_results): logging.info("Fitting models") models = options['models'].replace(' ', '').split(';') # TODO: Make work for 2D results, i.e., curves, comm_sep, o_ring # TODO: Make work for curves in general (check if 'x' present in core_res) fit_results = [] for core_result in core_results: # Each subset fit_result = {} for model in models: fits = _get_fits(core_result, model, options) values = _get_values(core_result, model, fits) stat_names, stats = _get_comparison_stat(core_result, values, model, fits) fit_result[model] = [fits, values, stat_names, stats] fit_results.append(fit_result) return fit_results
Fit models to empirical result from a function in emp module Parameters ---------- options : dict Option names and values for analysis core_results : list of tuples Output of function in emp Returns ------- list of dicts Each element in list corresponds to a subset. The dict has a key for each model given in options, and the value is a list of fitted parameters (tuple), values (array), comparison statistic names (list), and comparison statistic values (list). Notes ----- To determine if the empirical result refers to a curve or a distribution, the result dataframe is inspected for a column 'x', which indicates a curve.
def _save_results(options, module, core_results, fit_results): logging.info("Saving all results") # Use custom plot format mpl.rcParams.update(misc.rcparams.ggplot_rc) # Make run directory os.makedirs(options['run_dir']) # Write core results _write_core_tables(options, module, core_results) # Write additional results if analysis from emp if module == 'emp': _write_subset_index_file(options, core_results) # Write model/data comparison if models were given if fit_results: models = options['models'].replace(' ','').split(';') for i, core_result in enumerate(core_results): _write_fitted_params(i, models, options, fit_results) _write_test_statistics(i, models, options, fit_results) _write_comparison_plot_table(i, models, options, core_results, fit_results)
Save results of analysis as tables and figures Parameters ---------- options : dict Option names and values for analysis module : str Module that contained function used to generate core_results core_results : dataframe, array, value, list of tuples Results of main analysis fit_results : list or None Results of comparing emp analysis to models, None if not applicable
def _write_core_tables(options, module, core_results): table_name = 'core_result.csv' single_file_path = os.path.join(options['run_dir'], table_name) if module == 'emp': # List of tuples for i, core_result in enumerate(core_results): file_path = _get_file_path(i, options, table_name) core_result[1].to_csv(file_path, index=False, float_format='%.4f') elif type(core_results) == type(pd.DataFrame()): # DataFrame core_results.to_csv(single_file_path, index=False, float_format='%.4f') else: # Array or single value (atleast_1d corrects for unsized array) df = pd.DataFrame({'y': np.atleast_1d(core_results)}) df.to_csv(single_file_path, index=False, float_format='%.4f')
Notes ----- Depending on function that was called for analysis, core_results may be a list of tuples (empirical), a dataframe, an array, or a single value. For the list of tuples from empirical, the second element of each tuple is the raw result, and we write them all with the appropriate prefix. For dataframes, we write them. For arrays or single values, we convert to data frames and write them.
def _write_subset_index_file(options, core_results): f_path = os.path.join(options['run_dir'], '_subset_index.csv') subset_strs = zip(*core_results)[0] index = np.arange(len(subset_strs)) + 1 df = pd.DataFrame({'subsets': subset_strs}, index=index) df.to_csv(f_path)
Write table giving index of subsets, giving number and subset string
def _pad_plot_frame(ax, pad=0.01): xmin, xmax = ax.get_xlim() ymin, ymax = ax.get_ylim() xr = xmax - xmin yr = ymax - ymin ax.set_xlim(xmin - xr*pad, xmax + xr*pad) ax.set_ylim(ymin - yr*pad, ymax + yr*pad) return ax
Provides padding on sides of frame equal to pad fraction of plot
def _output_cdf_plot(core_result, spid, models, options, fit_results): # CDF x = core_result['y'].values df = emp.empirical_cdf(x) df.columns = ['x', 'empirical'] def calc_func(model, df, shapes): return eval("mod.%s.cdf(df['x'], *shapes)" % model) plot_exec_str = "ax.step(df['x'], emp, color='k', lw=3);ax.set_ylim(top=1)" _save_table_and_plot(spid, models, options, fit_results, 'data_pred_cdf', df, calc_func, plot_exec_str)
Function for plotting cdf
def output_pdf_plot(core_result, spid, models, options, fit_results): # PDF/PMF hist_bins = 11 emp_hist, edges = np.histogram(core_result['y'].values, hist_bins, normed=True) x = (np.array(edges[:-1]) + np.array(edges[1:])) / 2 df = pd.DataFrame({'x': x, 'empirical': emp_hist}) def calc_func(model, df, shapes): try: return eval("mod.%s.pmf(np.floor(df['x']), *shapes)" % model) except: return eval("mod.%s.pdf(df['x'], *shapes)" % model) plot_exec_str = "ax.bar(df['x']-width/2, emp, width=width, color='gray')" _save_table_and_plot(spid, models, options, fit_results, 'data_pred_pdf', df, calc_func, plot_exec_str)
Function for plotting pdf/pmf
def open(self): self._sendCmd(self.REG_PWR_MGMT_1, 0x00) self._sendCmd(self.REG_PWR_MGMT_2, 0x00)
! \~english Trun on device and with all sensors at same time \~chinese 开启全部传感器
def openOnlyAccel(self, cycleFreq = 0x00 ): self.openWith(accel = True, gyro = False, temp = False, cycle = True, cycleFreq = cycleFreq)
! Trun on device into Accelerometer Only Low Power Mode @param cycleFreq can be choise: @see VAL_PWR_MGMT_2_LP_WAKE_CTRL_1_25HZ is default @see VAL_PWR_MGMT_2_LP_WAKE_CTRL_5HZ @see VAL_PWR_MGMT_2_LP_WAKE_CTRL_20HZ @see VAL_PWR_MGMT_2_LP_WAKE_CTRL_40HZ
def setDataRdyInt(self, int_cfg = 0x20 ): self._sendCmd( self.REG_INT_PIN_CFG, int_cfg ) self._sendCmd( self.REG_INT_ENABLE, self.VAL_INT_ENABLE_DATA_RDY)
! \~english Set to enabled Data Ready Interrupt int_cfg : Register 55( 0x37 ) – INT Pin / Bypass Enable Configuration, page 26 \~chinese 启用数据就绪中断 @param int_cfg: 寄存器 55( 0x37 ) – INT Pin / Bypass Enable Configuration, page 26
def readAccelRange( self ): raw_data = self._readByte(self.REG_ACCEL_CONFIG) raw_data = (raw_data | 0xE7) ^ 0xE7 return raw_data
! Reads the range of accelerometer setup. @return an int value. It should be one of the following values: @see ACCEL_RANGE_2G @see ACCEL_RANGE_4G @see ACCEL_RANGE_8G @see ACCEL_RANGE_16G
def getAccelData( self, raw = False ): x = self._readWord(self.REG_ACCEL_XOUT_H) y = self._readWord(self.REG_ACCEL_YOUT_H) z = self._readWord(self.REG_ACCEL_ZOUT_H) accel_scale_modifier = None accel_range = self.readAccelRange() if accel_range == self.ACCEL_RANGE_2G: accel_scale_modifier = self.ACCEL_SCALE_MODIFIER_2G elif accel_range == self.ACCEL_RANGE_4G: accel_scale_modifier = self.ACCEL_SCALE_MODIFIER_4G elif accel_range == self.ACCEL_RANGE_8G: accel_scale_modifier = self.ACCEL_SCALE_MODIFIER_8G elif accel_range == self.ACCEL_RANGE_16G: accel_scale_modifier = self.ACCEL_SCALE_MODIFIER_16G else: print( "ERROR: Unkown accel range!" ) return False #accel_scale_modifier = self.ACCEL_SCALE_MODIFIER_2G x = x / accel_scale_modifier y = y / accel_scale_modifier z = z / accel_scale_modifier if raw == True: return { 'x': x, 'y': y, 'z': z } elif raw == False: return { 'x': x * self._gravityFactor, 'y': y * self._gravityFactor, 'z': z * self._gravityFactor }
! Gets and returns the X, Y and Z values from the accelerometer. @param raw If raw is True, it will return the data in m/s^2,<br> If raw is False, it will return the data in g @return a dictionary with the measurement results or Boolean. @retval {...} data in m/s^2 if raw is True. @retval {...} data in g if raw is False. @retval False means 'Unkown accel range', that you need to check the "accel range" configuration @note Result data format: {"x":0.45634,"y":0.2124,"z":1.334}
def readGyroRange( self ): raw_data = self._readByte( self.REG_GYRO_CONFIG ) raw_data = (raw_data | 0xE7) ^ 0xE7 return raw_data
! Read range of gyroscope. @return an int value. It should be one of the following values (GYRO_RANGE_250DEG) @see GYRO_RANGE_250DEG @see GYRO_RANGE_500DEG @see GYRO_RANGE_1KDEG @see GYRO_RANGE_2KDEG
def getGyroData(self): x = self._readWord(self.REG_GYRO_XOUT_H) y = self._readWord(self.REG_GYRO_YOUT_H) z = self._readWord(self.REG_GYRO_ZOUT_H) gyro_scale_modifier = None gyro_range = self.readGyroRange() if gyro_range == self.GYRO_RANGE_250DEG: gyro_scale_modifier = self.GYRO_SCALE_MODIFIER_250DEG elif gyro_range == self.GYRO_RANGE_500DEG: gyro_scale_modifier = self.GYRO_SCALE_MODIFIER_500DEG elif gyro_range == self.GYRO_RANGE_1KDEG: gyro_scale_modifier = self.GYRO_SCALE_MODIFIER_1KDEG elif gyro_range == self.GYRO_RANGE_2KDEG: gyro_scale_modifier = self.GYRO_SCALE_MODIFIER_2KDEG else: print("ERROR: Unkown gyroscope range!") return False #gyro_scale_modifier = self.GYRO_SCALE_MODIFIER_250DEG x = x / gyro_scale_modifier y = y / gyro_scale_modifier z = z / gyro_scale_modifier return {'x': x, 'y': y, 'z': z}
! Gets and returns the X, Y and Z values from the gyroscope @return a dictionary with the measurement results or Boolean. @retval {...} a dictionary data. @retval False means 'Unkown gyroscope range', that you need to check the "gyroscope range" configuration @note Result data format: {"x":0.45634,"y":0.2124,"z":1.334}
def getAllData(self, temp = True, accel = True, gyro = True): allData = {} if temp: allData["temp"] = self.getTemp() if accel: allData["accel"] = self.getAccelData( raw = False ) if gyro: allData["gyro"] = self.getGyroData() return allData
! Get all the available data. @param temp: True - Allow to return Temperature data @param accel: True - Allow to return Accelerometer data @param gyro: True - Allow to return Gyroscope data @return a dictionary data @retval {} Did not read any data @retval {"temp":32.3,"accel":{"x":0.45634,"y":0.2124,"z":1.334},"gyro":{"x":0.45634,"y":0.2124,"z":1.334}} Returned all data
def repeater(pipe, how_many=2): ''' this function repeats each value in the pipeline however many times you need ''' r = range(how_many) for i in pipe: for _ in r: yield f repeater(pipe, how_many=2): ''' this function repeats each value in the pipeline however many times you need ''' r = range(how_many) for i in pipe: for _ in r: yield i
this function repeats each value in the pipeline however many times you need
def get_neighbor_expression_vector(neighbors, gene_expression_dict): expressions = [] # Expression vector. for gene in neighbors: try: expression = gene_expression_dict[gene] except KeyError: continue expressions.append(expression) return expressions
Get an expression vector of neighboring genes. Attribute: neighbors (list): List of gene identifiers of neighboring genes. gene_expression_dict (dict): (Gene identifier)-(gene expression) dictionary.
def exp2prob(expression_vector): v = np.asarray(expression_vector) if np.sum(v) == 0: return np.zeros(len(expression_vector)) else: return v / np.sum(v)
Convert an expression vector into a probability vector. Attribute: expression_vector (list): List of expression values.
def kld(p1, p2): return np.sum(np.where(p1 != 0, p1 * np.log(p1 / p2), 0))
Compute Kullback-Leibler divergence between p1 and p2. It assumes that p1 and p2 are already normalized that each of them sums to 1.
def jsd(p1, p2): m = (p1 + p2) / 2 return (kld(p1, m) + kld(p2, m)) / 2
Compute Jensen-Shannon divergence between p1 and p2. It assumes that p1 and p2 are already normalized that each of them sums to 1.
def njsd(network, ref_gene_expression_dict, query_gene_expression_dict, gene_set): gene_jsd_dict = dict() reference_genes = ref_gene_expression_dict.keys() assert len(reference_genes) != 'Reference gene expression profile should have > 0 genes.' for gene in gene_set: if gene not in network.nodes: continue neighbors = find_neighbors(network, gene) query_expression_vec = get_neighbor_expression_vector(neighbors, query_gene_expression_dict) ref_expression_vec = get_neighbor_expression_vector(neighbors, ref_gene_expression_dict) assert len(query_expression_vec) == len(ref_expression_vec), 'Topology of reference network and query network differs. Please check.' # A gene which has non-expressed neighbors is ignored. if np.sum(query_expression_vec) == 0 and np.sum(ref_expression_vec) == 0: continue query_p_vec = exp2prob(query_expression_vec) ref_p_vec = exp2prob(ref_expression_vec) gene_jsd_dict[gene] = jsd(query_p_vec, ref_p_vec) return np.mean(list(gene_jsd_dict.values()))
Calculate Jensen-Shannon divergence between query and reference gene expression profile.
def lookupProcessor(name): if name in _proc_lookup: return _proc_lookup[name] else: error_string = 'If you are creating a new processor, please read the\ documentation on creating a new processor' raise LookupError("Unknown processor %s\n%s" % (name, error_string))
Lookup processor class object by its name
def read_developer_settings(): ret = read_cfg("/.rwmeta/developer_settings.json") env_developer_settings = os.environ.get('META_SERVICE_ACCOUNT_SECRET', None) if not env_developer_settings: env_developer_settings = os.environ.get('X-META-Developer-Settings', None) if env_developer_settings: ret = json.loads(env_developer_settings) return ret
Читает конфигурации разработчика с локальной машины или из переменных окружения При этом переменная окружения приоритетнее :return: dict|None
def read_cfg(path) -> dict: ret = None full_path = __build_path(path) if os.path.isfile(full_path): with open(full_path, 'r') as myfile: ret = json.loads(myfile.read()) return ret
:param path: example: "/.rwmeta/developer_settings.json" :return: dict
def write_cfg(path, value) -> None: full_path = __build_path(path) with open(full_path, 'w') as myfile: myfile.write(json.dumps(value))
:param path: example: "/.rwmeta/developer_settings.json" :param value: dict
def start(self, s): # type: (Optional[Type[Nonterminal]]) -> None if s is not None and s not in self.nonterminals: raise NonterminalDoesNotExistsException(None, s, self) self._start_symbol = s
Set start symbol of the grammar. :param s: Start symbol to set. :raise NonterminalDoesNotExistsException: If the start symbol is not in nonterminals.
def serialize(self, value, entity=None, request=None): ret = self.from_python(value) self.validate(ret) self.run_validators(value) return ret
Validate and serialize the value. This is the default implementation
def get_composite_reflectivity(self, tower_id, background='#000000', include_legend=True, include_counties=True, include_warnings=True, include_highways=True, include_cities=True, include_rivers=True, include_topography=True): return self._build_radar_image(tower_id, "NCR", background=background, include_legend=include_legend, include_counties=include_counties, include_warnings=include_warnings, include_highways=include_highways, include_cities=include_cities, include_rivers=include_rivers, include_topography=include_topography)
Get the composite reflectivity for a noaa radar site. :param tower_id: The noaa tower id. Ex Huntsville, Al -> 'HTX'. :type tower_id: str :param background: The hex background color. :type background: str :param include_legend: True - include legend. :type include_legend: bool :param include_counties: True - include county lines. :type include_counties: bool :param include_warnings: True - include warning lines. :type include_warnings: bool :param include_highways: True - include highways. :type include_highways: bool :param include_cities: True - include city labels. :type include_cities: bool :param include_rivers: True - include rivers :type include_rivers: bool :param include_topography: True - include topography :type include_topography: bool :rtype: PIL.Image :return: A PIL.Image instance with the Radar composite reflectivity.
def add(self, *terminals): # type: (Iterable[Any]) -> None for term in terminals: if term in self: continue super().add(term) self._assign_map[term] = set()
Add terminals into the set. :param terminals: Terminals to insert.
def remove(self, *terminals): # type: (Iterable[Any]) -> None for term in set(terminals): if term not in self: raise KeyError('Terminal ' + str(term) + ' is not inside') self._grammar.rules.remove(*self._assign_map[term], _validate=False) del self._assign_map[term] super().remove(term)
Remove terminals from the set. Removes also rules using this terminal. :param terminals: Terminals to remove. :raise KeyError if the object is not in the set.
def parse_network(network_fp): graph = nx.Graph() gene_set = set() with open(network_fp) as inFile: inFile.readline() # Skip header. for line in inFile.readlines(): gene1, gene2 = line.strip().split() graph.add_edge(gene1, gene2) gene_set.add(gene1) gene_set.add(gene2) return graph, gene_set
Parses network file and returns a network instance and a gene set. Attribute: network_fp (str): File path to a network file.
def parse_gene_set(gene_set_fp): group_gene_set_dict = OrderedDict() with open(gene_set_fp) as inFile: for line in inFile.readlines(): tokens = line.strip().split('\t') group = tokens[0] gene_set = set(tokens[1:]) group_gene_set_dict[group] = gene_set return group_gene_set_dict
Parses gene set file and returns a (group)-(gene set) dictionary. Attribute: gene_set_fp (str): File path to a gene set file.
def parse_gene_expression(gene_expression_fp, mean=False): gene_expression_dict = OrderedDict() with open(gene_expression_fp) as inFile: inFile.readline() # Skip header. for line in inFile.readlines(): tokens = line.strip().split('\t') gene_identifier = tokens[0] if mean: expression = np.log2(np.mean([float(t) for t in tokens[1:]]) + 1.0) else: expression = np.log2(float(tokens[1]) + 1.0) gene_expression_dict[gene_identifier] = expression return gene_expression_dict
Parses gene expression file and returns a (gene identifier)-(expression) dictionary. Attribute: gene_expression_fp (str): File path to a gene expression file. mean (bool): When making a normal(reference) gene expression profile, you might use average values of gene expressions for each gene. In this case, pass mean=True.
def side_task(pipe, *side_jobs): ''' allows you to run a function in a pipeline without affecting the data ''' # validate the input assert iterable(pipe), 'side_task needs the first argument to be iterable' for sj in side_jobs: assert callable(sj), 'all side_jobs need to be functions, not {}'.format(sj) # add a pass through function to side_jobs side_jobs = (lambda i:i ,) + side_jobs # run the pipeline for i in map(pipe, *side_jobs): yield i[0f side_task(pipe, *side_jobs): ''' allows you to run a function in a pipeline without affecting the data ''' # validate the input assert iterable(pipe), 'side_task needs the first argument to be iterable' for sj in side_jobs: assert callable(sj), 'all side_jobs need to be functions, not {}'.format(sj) # add a pass through function to side_jobs side_jobs = (lambda i:i ,) + side_jobs # run the pipeline for i in map(pipe, *side_jobs): yield i[0]
allows you to run a function in a pipeline without affecting the data
def create_return_line_item(cls, return_line_item, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_return_line_item_with_http_info(return_line_item, **kwargs) else: (data) = cls._create_return_line_item_with_http_info(return_line_item, **kwargs) return data
Create ReturnLineItem Create a new ReturnLineItem This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_return_line_item(return_line_item, async=True) >>> result = thread.get() :param async bool :param ReturnLineItem return_line_item: Attributes of returnLineItem to create (required) :return: ReturnLineItem If the method is called asynchronously, returns the request thread.
def delete_return_line_item_by_id(cls, return_line_item_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_return_line_item_by_id_with_http_info(return_line_item_id, **kwargs) else: (data) = cls._delete_return_line_item_by_id_with_http_info(return_line_item_id, **kwargs) return data
Delete ReturnLineItem Delete an instance of ReturnLineItem by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_return_line_item_by_id(return_line_item_id, async=True) >>> result = thread.get() :param async bool :param str return_line_item_id: ID of returnLineItem to delete. (required) :return: None If the method is called asynchronously, returns the request thread.