text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> rotates a 2d array to a multiple of 90 deg. <END_TASK> <USER_TASK:> Description: def rotateImage(image, angle): """ rotates a 2d array to a multiple of 90 deg. 0 = default 1 = 90 deg. cw 2 = 180 deg. 3 = 90 deg. ccw """
image = [list(row) for row in image] for n in range(angle % 4): image = list(zip(*image[::-1])) return image
<SYSTEM_TASK:> Returns True if sprite is touching any other sprite. <END_TASK> <USER_TASK:> Description: def overlaps(self, canvas, exclude=[]): """ Returns True if sprite is touching any other sprite. """
try: exclude = list(exclude) except TypeError: exclude = [exclude] exclude.append(self) for selfY, row in enumerate(self.image.image()): for selfX, pixel in enumerate(row): canvasPixelOn = canvas.testPixel( (selfX + self.position[0], selfY + self.position[1]), excludedSprites=exclude ) if pixel and canvasPixelOn: return True return False
<SYSTEM_TASK:> Returns a list of the sides of the sprite <END_TASK> <USER_TASK:> Description: def onEdge(self, canvas): """ Returns a list of the sides of the sprite which are touching the edge of the canvas. 0 = Bottom 1 = Left 2 = Top 3 = Right """
sides = [] if int(self.position[0]) <= 0: sides.append(1) if (int(self.position[0]) + self.image.width) >= canvas.width: sides.append(3) if int(self.position[1]) <= 0: sides.append(2) if (int(self.position[1]) + self.image.height) >= canvas.height: sides.append(0) return sides
<SYSTEM_TASK:> Property decorator that facilitates writing properties for values from a remote device. <END_TASK> <USER_TASK:> Description: def remote_property(name, get_command, set_command, field_name, doc=None): """Property decorator that facilitates writing properties for values from a remote device. Arguments: name: The field name to use on the local object to store the cached property. get_command: A function that returns the remote value of the property. set_command: A function that accepts a new value for the property and sets it remotely. field_name: The name of the field to retrieve from the response message to get operations. """
def getter(self): try: return getattr(self, name) except AttributeError: value = getattr(self.sendCommand(get_command()), field_name) setattr(self, name, value) return value def setter(self, value): setattr(self, name, value) self.sendCommand(set_command(value)) return property(getter, setter, doc=doc)
<SYSTEM_TASK:> Sends a Command object to the MCP2210 and returns its response. <END_TASK> <USER_TASK:> Description: def sendCommand(self, command): """Sends a Command object to the MCP2210 and returns its response. Arguments: A commands.Command instance Returns: A commands.Response instance, or raises a CommandException on error. """
command_data = [ord(x) for x in buffer(command)] self.hid.write(command_data) response_data = ''.join(chr(x) for x in self.hid.read(64)) response = command.RESPONSE.from_buffer_copy(response_data) if response.status != 0: raise CommandException(response.status) return response
<SYSTEM_TASK:> Transfers data over SPI. <END_TASK> <USER_TASK:> Description: def transfer(self, data): """Transfers data over SPI. Arguments: data: The data to transfer. Returns: The data returned by the SPI device. """
settings = self.transfer_settings settings.spi_tx_size = len(data) self.transfer_settings = settings response = '' for i in range(0, len(data), 60): response += self.sendCommand(commands.SPITransferCommand(data[i:i + 60])).data time.sleep(0.01) while len(response) < len(data): response += self.sendCommand(commands.SPITransferCommand('')).data return ''.join(response)
<SYSTEM_TASK:> Limited serialization for shipping plain data. Do not use for models <END_TASK> <USER_TASK:> Description: def render_json_response(self, context_dict, status=200): """ Limited serialization for shipping plain data. Do not use for models or other complex or custom objects. """
json_context = json.dumps( context_dict, cls=DjangoJSONEncoder, **self.get_json_dumps_kwargs() ).encode(u'utf-8') return HttpResponse( json_context, content_type=self.get_content_type(), status=status )
<SYSTEM_TASK:> If the request is ajax, save the form and return a json response. <END_TASK> <USER_TASK:> Description: def form_valid(self, form): """ If the request is ajax, save the form and return a json response. Otherwise return super as expected. """
self.object = form.save(commit=False) self.pre_save() self.object.save() if hasattr(form, 'save_m2m'): form.save_m2m() self.post_save() if self.request.is_ajax(): return self.render_json_response(self.get_success_result()) return HttpResponseRedirect(self.get_success_url())
<SYSTEM_TASK:> We have errors in the form. If ajax, return them as json. <END_TASK> <USER_TASK:> Description: def form_invalid(self, form): """ We have errors in the form. If ajax, return them as json. Otherwise, proceed as normal. """
if self.request.is_ajax(): return self.render_json_response(self.get_error_result(form)) return super(AjaxFormMixin, self).form_invalid(form)
<SYSTEM_TASK:> Computes a 1D simple moving average along the given axis. <END_TASK> <USER_TASK:> Description: def sma(array, window_size, axis=-1, mode='reflect', **kwargs): """ Computes a 1D simple moving average along the given axis. Parameters ---------- array : ndarray Array on which to perform the convolution. window_size: int Width of the simple moving average window in indices. axis : int, optional Axis along which to perform the moving average mode : {‘reflect’, ‘constant’, ‘nearest’, ‘mirror’, ‘wrap’}, optional The mode parameter determines how the array borders are handled, where cval is the value when mode is equal to ‘constant’. Default is ‘reflect’. kwargs : optional Other arguments to pass to `scipy.ndimage.filters.convolve1d` Returns ------- sma : ndarray Simple moving average of the given array with the specified window size along the requested axis. Raises ------ TypeError: If window_size or axis are not integers. """
kwargs['axis'] = axis kwargs['mode'] = mode if not isinstance(window_size, int): raise TypeError('window_size must be an integer') if not isinstance(kwargs['axis'], int): raise TypeError('axis must be an integer') return convolve1d(array, np.repeat(1.0, window_size)/window_size, **kwargs)
<SYSTEM_TASK:> Builds the URL for elevations API services based on the data given <END_TASK> <USER_TASK:> Description: def build_url(self): """Builds the URL for elevations API services based on the data given by the user. Returns: url (str): URL for the elevations API services """
url = '{protocol}/{url}/{rest}/{version}/{restapi}/{rscpath}/' \ '{query}'.format(protocol=self.schema.protocol, url=self.schema.main_url, rest=self.schema.rest, version=self.schema.version, restapi=self.schema.restApi, rscpath=self.schema.resourcePath, query=self.schema.query) return url.replace('/None/', '/')
<SYSTEM_TASK:> Retrieves zoomlevel from the output response <END_TASK> <USER_TASK:> Description: def zoomlevel(self): """Retrieves zoomlevel from the output response Returns: zoomlevel (namedtuple): A namedtuple of zoomlevel from the output response """
resources = self.get_resource() zoomlevel = namedtuple('zoomlevel', 'zoomLevel') try: return [zoomlevel(resource['zoomLevel']) for resource in resources] except TypeError: try: if isinstance(resources['ElevationData'], dict): return zoomlevel(resources['ElevationData']['ZoomLevel']) except KeyError: try: if isinstance(resources['SeaLevelData'], dict): zoom = resources['SeaLevelData']['ZoomLevel'] return zoomlevel(zoom) except KeyError: print(KeyError)
<SYSTEM_TASK:> Writes output to a JSON file with the given file name <END_TASK> <USER_TASK:> Description: def to_json_file(self, path, file_name=None): """Writes output to a JSON file with the given file name"""
if bool(path) and os.path.isdir(path): self.write_to_json(path, file_name) else: self.write_to_json(os.getcwd(), file_name)
<SYSTEM_TASK:> Gets data from the built url <END_TASK> <USER_TASK:> Description: def get_data(self): """Gets data from the built url"""
url = self.build_url() self.locationApiData = requests.get(url) if not self.locationApiData.status_code == 200: raise self.locationApiData.raise_for_status()
<SYSTEM_TASK:> Updates the compaction options for the given model if necessary. <END_TASK> <USER_TASK:> Description: def update_compaction(model): """Updates the compaction options for the given model if necessary. :param model: The model to update. :return: `True`, if the compaction options were modified in Cassandra, `False` otherwise. :rtype: bool """
logger.debug("Checking %s for compaction differences", model) table = get_table_settings(model) existing_options = table.options.copy() existing_compaction_strategy = existing_options['compaction_strategy_class'] existing_options = json.loads(existing_options['compaction_strategy_options']) desired_options = get_compaction_options(model) desired_compact_strategy = desired_options.get('class', SizeTieredCompactionStrategy) desired_options.pop('class', None) do_update = False if desired_compact_strategy not in existing_compaction_strategy: do_update = True for k, v in desired_options.items(): val = existing_options.pop(k, None) if val != v: do_update = True # check compaction_strategy_options if do_update: options = get_compaction_options(model) # jsonify options = json.dumps(options).replace('"', "'") cf_name = model.column_family_name() query = "ALTER TABLE {} with compaction = {}".format(cf_name, options) logger.debug(query) execute(query) return True return False
<SYSTEM_TASK:> Records the hosts and connects to one of them <END_TASK> <USER_TASK:> Description: def setup( hosts, default_keyspace, consistency=ConsistencyLevel.ONE, lazy_connect=False, retry_connect=False, **kwargs): """ Records the hosts and connects to one of them :param hosts: list of hosts, see http://datastax.github.io/python-driver/api/cassandra/cluster.html :type hosts: list :param default_keyspace: The default keyspace to use :type default_keyspace: str :param consistency: The global consistency level :type consistency: int :param lazy_connect: True if should not connect until first use :type lazy_connect: bool :param retry_connect: bool :param retry_connect: True if we should retry to connect even if there was a connection failure initially """
global cluster, session, default_consistency_level, lazy_connect_args if 'username' in kwargs or 'password' in kwargs: raise CQLEngineException("Username & Password are now handled by using the native driver's auth_provider") if not default_keyspace: raise UndefinedKeyspaceException() from cqlengine import models models.DEFAULT_KEYSPACE = default_keyspace default_consistency_level = consistency if lazy_connect: kwargs['default_keyspace'] = default_keyspace kwargs['consistency'] = consistency kwargs['lazy_connect'] = False kwargs['retry_connect'] = retry_connect lazy_connect_args = (hosts, kwargs) return cluster = Cluster(hosts, **kwargs) try: session = cluster.connect() except NoHostAvailable: if retry_connect: kwargs['default_keyspace'] = default_keyspace kwargs['consistency'] = consistency kwargs['lazy_connect'] = False kwargs['retry_connect'] = retry_connect lazy_connect_args = (hosts, kwargs) raise session.row_factory = dict_factory
<SYSTEM_TASK:> Returns a cleaned and validated value. Raises a ValidationError <END_TASK> <USER_TASK:> Description: def validate(self, value): """ Returns a cleaned and validated value. Raises a ValidationError if there's a problem """
if value is None: if self.required: raise ValidationError('{} - None values are not allowed'.format(self.column_name or self.db_field)) return value
<SYSTEM_TASK:> generates a UUID for a given datetime <END_TASK> <USER_TASK:> Description: def from_datetime(self, dt): """ generates a UUID for a given datetime :param dt: datetime :type dt: datetime :return: """
global _last_timestamp epoch = datetime(1970, 1, 1, tzinfo=dt.tzinfo) offset = epoch.tzinfo.utcoffset(epoch).total_seconds() if epoch.tzinfo else 0 timestamp = (dt - epoch).total_seconds() - offset node = None clock_seq = None nanoseconds = int(timestamp * 1e9) timestamp = int(nanoseconds // 100) + 0x01b21dd213814000 if clock_seq is None: import random clock_seq = random.randrange(1 << 14) # instead of stable storage time_low = timestamp & 0xffffffff time_mid = (timestamp >> 32) & 0xffff time_hi_version = (timestamp >> 48) & 0x0fff clock_seq_low = clock_seq & 0xff clock_seq_hi_variant = (clock_seq >> 8) & 0x3f if node is None: node = getnode() return pyUUID(fields=(time_low, time_mid, time_hi_version, clock_seq_hi_variant, clock_seq_low, node), version=1)
<SYSTEM_TASK:> Called by the save function to check if this should be <END_TASK> <USER_TASK:> Description: def _can_update(self): """ Called by the save function to check if this should be persisted with update or insert :return: """
if not self._is_persisted: return False pks = self._primary_keys.keys() return all([not self._values[k].changed for k in self._primary_keys])
<SYSTEM_TASK:> Deletes this instance <END_TASK> <USER_TASK:> Description: def delete(self): """ Deletes this instance """
self.__dmlquery__(self.__class__, self, batch=self._batch, timestamp=self._timestamp, consistency=self.__consistency__, timeout=self._timeout).delete()
<SYSTEM_TASK:> Adds WHERE arguments to the queryset, returning a new queryset <END_TASK> <USER_TASK:> Description: def filter(self, *args, **kwargs): """ Adds WHERE arguments to the queryset, returning a new queryset #TODO: show examples :rtype: AbstractQuerySet """
#add arguments to the where clause filters if len([x for x in kwargs.values() if x is None]): raise CQLEngineException("None values on filter are not allowed") clone = copy.deepcopy(self) for operator in args: if not isinstance(operator, WhereClause): raise QueryException('{} is not a valid query operator'.format(operator)) clone._where.append(operator) for arg, val in kwargs.items(): col_name, col_op = self._parse_filter_arg(arg) quote_field = True #resolve column and operator try: column = self.model._get_column(col_name) except KeyError: if col_name == 'pk__token': if not isinstance(val, Token): raise QueryException("Virtual column 'pk__token' may only be compared to Token() values") column = columns._PartitionKeysToken(self.model) quote_field = False else: raise QueryException("Can't resolve column name: '{}'".format(col_name)) if isinstance(val, Token): if col_name != 'pk__token': raise QueryException("Token() values may only be compared to the 'pk__token' virtual column") partition_columns = column.partition_columns if len(partition_columns) != len(val.value): raise QueryException( 'Token() received {} arguments but model has {} partition keys'.format( len(val.value), len(partition_columns))) val.set_columns(partition_columns) #get query operator, or use equals if not supplied operator_class = BaseWhereOperator.get_operator(col_op or 'EQ') operator = operator_class() if isinstance(operator, InOperator): if not isinstance(val, (list, tuple)): raise QueryException('IN queries must use a list/tuple value') query_val = [column.to_database(v) for v in val] elif isinstance(val, BaseQueryFunction): query_val = val else: query_val = column.to_database(val) clone._where.append(WhereClause(column.db_field_name, operator, query_val, quote_field=quote_field)) return clone
<SYSTEM_TASK:> orders the result set. <END_TASK> <USER_TASK:> Description: def order_by(self, *colnames): """ orders the result set. ordering can only use clustering columns. Default order is ascending, prepend a '-' to the column name for descending """
if len(colnames) == 0: clone = copy.deepcopy(self) clone._order = [] return clone conditions = [] for colname in colnames: conditions.append('"{}" {}'.format(*self._get_ordering_condition(colname))) clone = copy.deepcopy(self) clone._order.extend(conditions) return clone
<SYSTEM_TASK:> Returns the number of rows matched by this query <END_TASK> <USER_TASK:> Description: def count(self): """ Returns the number of rows matched by this query """
if self._batch: raise CQLEngineException("Only inserts, updates, and deletes are available in batch mode") if self._result_cache is None: query = self._select_query() query.count = True result = self._execute(query) return result[0]['count'] else: return len(self._result_cache)
<SYSTEM_TASK:> Sets the limit on the number of results returned <END_TASK> <USER_TASK:> Description: def limit(self, v): """ Sets the limit on the number of results returned CQL has a default limit of 10,000 """
if not (v is None or isinstance(v, six.integer_types)): raise TypeError if v == self._limit: return self if v < 0: raise QueryException("Negative limit is not allowed") clone = copy.deepcopy(self) clone._limit = v return clone
<SYSTEM_TASK:> Updates the rows in this queryset <END_TASK> <USER_TASK:> Description: def update(self, **values): """ Updates the rows in this queryset """
if not values: return nulled_columns = set() us = UpdateStatement(self.column_family_name, where=self._where, ttl=self._ttl, timestamp=self._timestamp, transactions=self._transaction) for name, val in values.items(): col_name, col_op = self._parse_filter_arg(name) col = self.model._columns.get(col_name) # check for nonexistant columns if col is None: raise ValidationError("{}.{} has no column named: {}".format(self.__module__, self.model.__name__, col_name)) # check for primary key update attempts if col.is_primary_key: raise ValidationError("Cannot apply update to primary key '{}' for {}.{}".format(col_name, self.__module__, self.model.__name__)) # we should not provide default values in this use case. val = col.validate(val) if val is None: nulled_columns.add(col_name) continue # add the update statements if isinstance(col, Counter): # TODO: implement counter updates raise NotImplementedError elif isinstance(col, (List, Set, Map)): if isinstance(col, List): klass = ListUpdateClause elif isinstance(col, Set): klass = SetUpdateClause elif isinstance(col, Map): klass = MapUpdateClause else: raise RuntimeError us.add_assignment_clause(klass(col_name, col.to_database(val), operation=col_op)) else: us.add_assignment_clause(AssignmentClause( col_name, col.to_database(val))) if us.assignments: self._execute(us) if nulled_columns: ds = DeleteStatement(self.column_family_name, fields=nulled_columns, where=self._where) self._execute(ds)
<SYSTEM_TASK:> Handle format request <END_TASK> <USER_TASK:> Description: def handle(client, request): """ Handle format request request struct: { 'data': 'data_need_format', 'formaters': [ { 'name': 'formater_name', 'config': {} # None or dict }, ... # formaters ] } if no formaters, use autopep8 formater and it's default config """
formaters = request.get('formaters', None) if not formaters: formaters = [{'name': 'autopep8'}] logging.debug('formaters: ' + json.dumps(formaters, indent=4)) data = request.get('data', None) if not isinstance(data, str): return send(client, 'invalid data', None) max_line_length = None for formater in formaters: max_line_length = formater.get('config', {}).get('max_line_length') if max_line_length: break for formater in formaters: name = formater.get('name', None) config = formater.get('config', {}) if name not in FORMATERS: return send(client, 'formater {} not support'.format(name), None) formater = FORMATERS[name] if formater is None: return send(client, 'formater {} not installed'.format(name), None) if name == 'isort' and max_line_length: config.setdefault('line_length', max_line_length) data = formater(data, **config) return send(client, None, data)
<SYSTEM_TASK:> A quadratic tween function that accelerates, reaches the midpoint, and then decelerates. <END_TASK> <USER_TASK:> Description: def easeInOutQuad(n): """A quadratic tween function that accelerates, reaches the midpoint, and then decelerates. Args: n (float): The time progress, starting at 0.0 and ending at 1.0. Returns: (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine(). """
_checkRange(n) if n < 0.5: return 2 * n**2 else: n = n * 2 - 1 return -0.5 * (n*(n-2) - 1)
<SYSTEM_TASK:> A cubic tween function that accelerates, reaches the midpoint, and then decelerates. <END_TASK> <USER_TASK:> Description: def easeInOutCubic(n): """A cubic tween function that accelerates, reaches the midpoint, and then decelerates. Args: n (float): The time progress, starting at 0.0 and ending at 1.0. Returns: (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine(). """
_checkRange(n) n = 2 * n if n < 1: return 0.5 * n**3 else: n = n - 2 return 0.5 * (n**3 + 2)
<SYSTEM_TASK:> A quartic tween function that accelerates, reaches the midpoint, and then decelerates. <END_TASK> <USER_TASK:> Description: def easeInOutQuart(n): """A quartic tween function that accelerates, reaches the midpoint, and then decelerates. Args: n (float): The time progress, starting at 0.0 and ending at 1.0. Returns: (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine(). """
_checkRange(n) n = 2 * n if n < 1: return 0.5 * n**4 else: n = n - 2 return -0.5 * (n**4 - 2)
<SYSTEM_TASK:> A quintic tween function that accelerates, reaches the midpoint, and then decelerates. <END_TASK> <USER_TASK:> Description: def easeInOutQuint(n): """A quintic tween function that accelerates, reaches the midpoint, and then decelerates. Args: n (float): The time progress, starting at 0.0 and ending at 1.0. Returns: (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine(). """
_checkRange(n) n = 2 * n if n < 1: return 0.5 * n**5 else: n = n - 2 return 0.5 * (n**5 + 2)
<SYSTEM_TASK:> An exponential tween function that accelerates, reaches the midpoint, and then decelerates. <END_TASK> <USER_TASK:> Description: def easeInOutExpo(n): """An exponential tween function that accelerates, reaches the midpoint, and then decelerates. Args: n (float): The time progress, starting at 0.0 and ending at 1.0. Returns: (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine(). """
_checkRange(n) if n == 0: return 0 elif n == 1: return 1 else: n = n * 2 if n < 1: return 0.5 * 2**(10 * (n - 1)) else: n -= 1 # 0.5 * (-() + 2) return 0.5 * (-1 * (2 ** (-10 * n)) + 2)
<SYSTEM_TASK:> A circular tween function that accelerates, reaches the midpoint, and then decelerates. <END_TASK> <USER_TASK:> Description: def easeInOutCirc(n): """A circular tween function that accelerates, reaches the midpoint, and then decelerates. Args: n (float): The time progress, starting at 0.0 and ending at 1.0. Returns: (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine(). """
_checkRange(n) n = n * 2 if n < 1: return -0.5 * (math.sqrt(1 - n**2) - 1) else: n = n - 2 return 0.5 * (math.sqrt(1 - n**2) + 1)
<SYSTEM_TASK:> An elastic tween function that begins with an increasing wobble and then snaps into the destination. <END_TASK> <USER_TASK:> Description: def easeInElastic(n, amplitude=1, period=0.3): """An elastic tween function that begins with an increasing wobble and then snaps into the destination. Args: n (float): The time progress, starting at 0.0 and ending at 1.0. Returns: (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine(). """
_checkRange(n) return 1 - easeOutElastic(1-n, amplitude=amplitude, period=period)
<SYSTEM_TASK:> An elastic tween function that overshoots the destination and then "rubber bands" into the destination. <END_TASK> <USER_TASK:> Description: def easeOutElastic(n, amplitude=1, period=0.3): """An elastic tween function that overshoots the destination and then "rubber bands" into the destination. Args: n (float): The time progress, starting at 0.0 and ending at 1.0. Returns: (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine(). """
_checkRange(n) if amplitude < 1: amplitude = 1 s = period / 4 else: s = period / (2 * math.pi) * math.asin(1 / amplitude) return amplitude * 2**(-10*n) * math.sin((n-s)*(2*math.pi / period)) + 1
<SYSTEM_TASK:> An elastic tween function wobbles towards the midpoint. <END_TASK> <USER_TASK:> Description: def easeInOutElastic(n, amplitude=1, period=0.5): """An elastic tween function wobbles towards the midpoint. Args: n (float): The time progress, starting at 0.0 and ending at 1.0. Returns: (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine(). """
_checkRange(n) n *= 2 if n < 1: return easeInElastic(n, amplitude=amplitude, period=period) / 2 else: return easeOutElastic(n-1, amplitude=amplitude, period=period) / 2 + 0.5
<SYSTEM_TASK:> A tween function that backs up first at the start and then goes to the destination. <END_TASK> <USER_TASK:> Description: def easeInBack(n, s=1.70158): """A tween function that backs up first at the start and then goes to the destination. Args: n (float): The time progress, starting at 0.0 and ending at 1.0. Returns: (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine(). """
_checkRange(n) return n * n * ((s + 1) * n - s)
<SYSTEM_TASK:> A tween function that overshoots the destination a little and then backs into the destination. <END_TASK> <USER_TASK:> Description: def easeOutBack(n, s=1.70158): """A tween function that overshoots the destination a little and then backs into the destination. Args: n (float): The time progress, starting at 0.0 and ending at 1.0. Returns: (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine(). """
_checkRange(n) n = n - 1 return n * n * ((s + 1) * n + s) + 1
<SYSTEM_TASK:> A "back-in" tween function that overshoots both the start and destination. <END_TASK> <USER_TASK:> Description: def easeInOutBack(n, s=1.70158): """A "back-in" tween function that overshoots both the start and destination. Args: n (float): The time progress, starting at 0.0 and ending at 1.0. Returns: (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine(). """
_checkRange(n) n = n * 2 if n < 1: s *= 1.525 return 0.5 * (n * n * ((s + 1) * n - s)) else: n -= 2 s *= 1.525 return 0.5 * (n * n * ((s + 1) * n + s) + 2)
<SYSTEM_TASK:> A bouncing tween function that hits the destination and then bounces to rest. <END_TASK> <USER_TASK:> Description: def easeOutBounce(n): """A bouncing tween function that hits the destination and then bounces to rest. Args: n (float): The time progress, starting at 0.0 and ending at 1.0. Returns: (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine(). """
_checkRange(n) if n < (1/2.75): return 7.5625 * n * n elif n < (2/2.75): n -= (1.5/2.75) return 7.5625 * n * n + 0.75 elif n < (2.5/2.75): n -= (2.25/2.75) return 7.5625 * n * n + 0.9375 else: n -= (2.65/2.75) return 7.5625 * n * n + 0.984375
<SYSTEM_TASK:> Find all objects related to instances of ``queryset`` that should also be deleted. <END_TASK> <USER_TASK:> Description: def get_deleted_objects(self, request, queryset): """ Find all objects related to instances of ``queryset`` that should also be deleted. Returns - to_delete - a nested list of strings suitable for display in the template with the ``unordered_list`` filter. - model_count - statistics for models of all deleted instances - perms_needed - list of names for all instances which can not be deleted because of not enough rights - protected - list of names for all objects protected for deletion because of reference type """
collector = NestedObjects(using=queryset.db) collector.collect(queryset) model_perms_needed = set() object_perms_needed = set() STRONG_DELETION_CONTROL = getattr(settings, 'ACCESS_STRONG_DELETION_CONTROL', False) def format_callback(obj): has_admin = obj.__class__ in self.admin_site._registry opts = obj._meta no_edit_link = '%s: %s' % (capfirst(opts.verbose_name), force_text(obj)) # Trying to get admin change URL admin_url = None try: admin_url = reverse('%s:%s_%s_change' % (self.admin_site.name, opts.app_label, opts.model_name), None, (quote(obj._get_pk_val()),)) except NoReverseMatch: # Change url doesn't exist -- don't display link to edit pass # Collecting forbidden subobjects, compatible with Django or forced by the option if STRONG_DELETION_CONTROL or has_admin: if not obj.__class__._meta.auto_created: manager = AccessManager(obj.__class__) # filter out forbidden items if manager.check_deleteable(obj.__class__, request) is False: model_perms_needed.add(opts.verbose_name) if not manager.apply_deleteable(obj.__class__._default_manager.filter(pk=obj.pk), request): object_perms_needed.add(obj) if admin_url: # Display a link to the admin page. return format_html('{}: <a href="{}">{}</a>', capfirst(opts.verbose_name), admin_url, obj) else: # Don't display link to edit, because it either has no # admin or is edited inline. return no_edit_link to_delete = collector.nested(format_callback) protected = [format_callback(obj) for obj in collector.protected] protected = set([format_callback(obj) for obj in object_perms_needed]).union(protected) model_count = {model._meta.verbose_name_plural: len(objs) for model, objs in collector.model_objs.items()} return to_delete, model_count, model_perms_needed, protected
<SYSTEM_TASK:> Returns plugin or plugin point class from given ``plugin_name`` string. <END_TASK> <USER_TASK:> Description: def get_plugin_from_string(plugin_name): """ Returns plugin or plugin point class from given ``plugin_name`` string. Example of ``plugin_name``:: 'my_app.MyPlugin' """
modulename, classname = plugin_name.rsplit('.', 1) module = import_module(modulename) return getattr(module, classname)
<SYSTEM_TASK:> Generate distributions's Propbability Distribution Function <END_TASK> <USER_TASK:> Description: def make_pdf(dist, params, size=10000): """Generate distributions's Propbability Distribution Function """
# Separate parts of parameters arg = params[:-2] loc = params[-2] scale = params[-1] # Get sane start and end points of distribution start = dist.ppf(0.01, *arg, loc=loc, scale=scale) if arg else dist.ppf(0.01, loc=loc, scale=scale) end = dist.ppf(0.99, *arg, loc=loc, scale=scale) if arg else dist.ppf(0.99, loc=loc, scale=scale) # Build PDF and turn into pandas Series x = np.linspace(start, end, size) y = dist.pdf(x, loc=loc, scale=scale, *arg) pdf = pd.Series(y, x) return pdf
<SYSTEM_TASK:> Correctly convert the given query and parameters into a full query+query <END_TASK> <USER_TASK:> Description: def urlencode(query, params): """ Correctly convert the given query and parameters into a full query+query string, ensuring the order of the params. """
return query + '?' + "&".join(key+'='+quote_plus(str(value)) for key, value in params)
<SYSTEM_TASK:> Returns model instance of plugin point or plugin, depending from which <END_TASK> <USER_TASK:> Description: def get_model(cls, name=None, status=ENABLED): """ Returns model instance of plugin point or plugin, depending from which class this methos is called. Example:: plugin_model_instance = MyPlugin.get_model() plugin_model_instance = MyPluginPoint.get_model('plugin-name') plugin_point_model_instance = MyPluginPoint.get_model() """
ppath = cls.get_pythonpath() if is_plugin_point(cls): if name is not None: kwargs = {} if status is not None: kwargs['status'] = status return Plugin.objects.get(point__pythonpath=ppath, name=name, **kwargs) else: return PluginPointModel.objects.get(pythonpath=ppath) else: return Plugin.objects.get(pythonpath=ppath)
<SYSTEM_TASK:> Returns plugin point model instance. Only used from plugin classes. <END_TASK> <USER_TASK:> Description: def get_point_model(cls): """ Returns plugin point model instance. Only used from plugin classes. """
if is_plugin_point(cls): raise Exception(_('This method is only available to plugin ' 'classes.')) else: return PluginPointModel.objects.\ get(plugin__pythonpath=cls.get_pythonpath())
<SYSTEM_TASK:> Returns all plugin instances of plugin point, passing all args and <END_TASK> <USER_TASK:> Description: def get_plugins(cls): """ Returns all plugin instances of plugin point, passing all args and kwargs to plugin constructor. """
# Django >= 1.9 changed something with the migration logic causing # plugins to be executed before the corresponding database tables # exist. This method will only return something if the database # tables have already been created. # XXX: I don't fully understand the issue and there should be # another way but this appears to work fine. if django_version >= (1, 9) and \ not db_table_exists(Plugin._meta.db_table): raise StopIteration if is_plugin_point(cls): for plugin_model in cls.get_plugins_qs(): yield plugin_model.get_plugin() else: raise Exception(_('This method is only available to plugin point ' 'classes.'))
<SYSTEM_TASK:> Coerce argument to unicode, if it's not already. <END_TASK> <USER_TASK:> Description: def safeunicode(arg, *args, **kwargs): """Coerce argument to unicode, if it's not already."""
return arg if isinstance(arg, unicode) else unicode(arg, *args, **kwargs)
<SYSTEM_TASK:> Returns energy data from 1960 to 2014 across various factors. <END_TASK> <USER_TASK:> Description: def get_reports(): """ Returns energy data from 1960 to 2014 across various factors. """
if False: # If there was a Test version of this method, it would go here. But alas. pass else: rows = _Constants._DATABASE.execute("SELECT data FROM energy".format( hardware=_Constants._HARDWARE)) data = [r[0] for r in rows] data = [_Auxiliary._byteify(_json.loads(r)) for r in data] return _Auxiliary._byteify(data)
<SYSTEM_TASK:> Iterate over all registered plugins or plugin points and prepare to add <END_TASK> <USER_TASK:> Description: def available(self, src, dst, model): """ Iterate over all registered plugins or plugin points and prepare to add them to database. """
for name, point in six.iteritems(src): inst = dst.pop(name, None) if inst is None: self.print_(1, "Registering %s for %s" % (model.__name__, name)) inst = model(pythonpath=name) if inst.status == REMOVED: self.print_(1, "Updating %s for %s" % (model.__name__, name)) # re-enable a previously removed plugin point and its plugins inst.status = ENABLED yield point, inst
<SYSTEM_TASK:> Mark all missing plugins, that exists in database, but are not <END_TASK> <USER_TASK:> Description: def missing(self, dst): """ Mark all missing plugins, that exists in database, but are not registered. """
for inst in six.itervalues(dst): if inst.status != REMOVED: inst.status = REMOVED inst.save()
<SYSTEM_TASK:> Synchronize all registered plugins and plugin points to database. <END_TASK> <USER_TASK:> Description: def all(self): """ Synchronize all registered plugins and plugin points to database. """
# Django >= 1.9 changed something with the migration logic causing # plugins to be executed before the corresponding database tables # exist. This method will only return something if the database # tables have already been created. # XXX: I don't fully understand the issue and there should be # another way but this appears to work fine. if django_version >= (1, 9) and ( not db_table_exists(Plugin._meta.db_table) or not db_table_exists(PluginPoint._meta.db_table)): return self.points()
<SYSTEM_TASK:> Returns weather reports from the dataset. <END_TASK> <USER_TASK:> Description: def get_weather(test=False): """ Returns weather reports from the dataset. """
if _Constants._TEST or test: rows = _Constants._DATABASE.execute("SELECT data FROM weather LIMIT {hardware}".format( hardware=_Constants._HARDWARE)) data = [r[0] for r in rows] data = [_Auxiliary._byteify(_json.loads(r)) for r in data] return _Auxiliary._byteify(data) else: rows = _Constants._DATABASE.execute("SELECT data FROM weather".format( hardware=_Constants._HARDWARE)) data = [r[0] for r in rows] data = [_Auxiliary._byteify(_json.loads(r)) for r in data] return _Auxiliary._byteify(data)
<SYSTEM_TASK:> Return a string describing the probable encoding of a file. <END_TASK> <USER_TASK:> Description: def description_of(file, name='stdin'): """Return a string describing the probable encoding of a file."""
u = UniversalDetector() for line in file: u.feed(line) u.close() result = u.result if result['encoding']: return '%s: %s with confidence %s' % (name, result['encoding'], result['confidence']) else: return '%s: no result' % name
<SYSTEM_TASK:> Decorator that wraps instance methods to prepend the instance's filename <END_TASK> <USER_TASK:> Description: def prepend_name_prefix(func): """ Decorator that wraps instance methods to prepend the instance's filename prefix to the beginning of the referenced filename. Must only be used on instance methods where the first parameter after `self` is `name` or a comparable parameter of a different name. """
@wraps(func) def prepend_prefix(self, name, *args, **kwargs): name = self.name_prefix + name return func(self, name, *args, **kwargs) return prepend_prefix
<SYSTEM_TASK:> Gets the appropriate piece of chalk for the logging level <END_TASK> <USER_TASK:> Description: def get_chalk(level): """Gets the appropriate piece of chalk for the logging level """
if level >= logging.ERROR: _chalk = chalk.red elif level >= logging.WARNING: _chalk = chalk.yellow elif level >= logging.INFO: _chalk = chalk.blue elif level >= logging.DEBUG: _chalk = chalk.green else: _chalk = chalk.white return _chalk
<SYSTEM_TASK:> Attempts to convert given object to a string object <END_TASK> <USER_TASK:> Description: def to_str(obj): """Attempts to convert given object to a string object """
if not isinstance(obj, str) and PY3 and isinstance(obj, bytes): obj = obj.decode('utf-8') return obj if isinstance(obj, string_types) else str(obj)
<SYSTEM_TASK:> Helper method to validate and map values used in the instantiation of <END_TASK> <USER_TASK:> Description: def get_color(self, value): """Helper method to validate and map values used in the instantiation of of the Color object to the correct unicode value. """
if value in COLOR_SET: value = COLOR_MAP[value] else: try: value = int(value) if value >= 8: raise ValueError() except ValueError as exc: raise ValueError( 'Colors should either a member of: {} or a positive ' 'integer below 8'.format(', '.join(COLOR_NAMES)) ) return '{}{}'.format(self.PREFIX, value)
<SYSTEM_TASK:> This method creates new shuffled file. <END_TASK> <USER_TASK:> Description: def shuffle(self, overwrite=False): """ This method creates new shuffled file. """
if overwrite: shuffled = self.path else: shuffled = FileAPI.add_ext_name(self.path, "_shuffled") lines = open(self.path).readlines() random.shuffle(lines) open(shuffled, "w").writelines(lines) self.path = shuffled
<SYSTEM_TASK:> Use count_reads on multiple files and store result in dict. <END_TASK> <USER_TASK:> Description: def multiple_files_count_reads_in_windows(bed_files, args): # type: (Iterable[str], Namespace) -> OrderedDict[str, List[pd.DataFrame]] """Use count_reads on multiple files and store result in dict. Untested since does the same thing as count reads."""
bed_windows = OrderedDict() # type: OrderedDict[str, List[pd.DataFrame]] for bed_file in bed_files: logging.info("Binning " + bed_file) if ".bedpe" in bed_file: chromosome_dfs = count_reads_in_windows_paired_end(bed_file, args) else: chromosome_dfs = count_reads_in_windows(bed_file, args) bed_windows[bed_file] = chromosome_dfs return bed_windows
<SYSTEM_TASK:> Merge lists of chromosome bin df chromosome-wise. <END_TASK> <USER_TASK:> Description: def _merge_files(windows, nb_cpu): # type: (Iterable[pd.DataFrame], int) -> pd.DataFrame """Merge lists of chromosome bin df chromosome-wise. windows is an OrderedDict where the keys are files, the values are lists of dfs, one per chromosome. Returns a list of dataframes, one per chromosome, with the collective count per bin for all files. TODO: is it faster to merge all in one command? """
# windows is a list of chromosome dfs per file windows = iter(windows) # can iterate over because it is odict_values merged = next(windows) # if there is only one file, the merging is skipped since the windows is used up for chromosome_dfs in windows: # merge_same_files merges the chromosome files in parallel merged = merge_same_files(merged, chromosome_dfs, nb_cpu) return merged
<SYSTEM_TASK:> Round values as in Python 2, for Python 3 compatibility. <END_TASK> <USER_TASK:> Description: def py2round(value): """Round values as in Python 2, for Python 3 compatibility. All x.5 values are rounded away from zero. In Python 3, this has changed to avoid bias: when x is even, rounding is towards zero, when x is odd, rounding is away from zero. Thus, in Python 3, round(2.5) results in 2, round(3.5) is 4. Python 3 also returns an int; Python 2 returns a float. """
if value > 0: return float(floor(float(value)+0.5)) else: return float(ceil(float(value)-0.5))
<SYSTEM_TASK:> Convert equivalent discrete intervals to different representations. <END_TASK> <USER_TASK:> Description: def canonicalize(interval, lower_inc=True, upper_inc=False): """ Convert equivalent discrete intervals to different representations. """
if not interval.discrete: raise TypeError('Only discrete ranges can be canonicalized') if interval.empty: return interval lower, lower_inc = canonicalize_lower(interval, lower_inc) upper, upper_inc = canonicalize_upper(interval, upper_inc) return interval.__class__( [lower, upper], lower_inc=lower_inc, upper_inc=upper_inc, )
<SYSTEM_TASK:> Return the greatest lower bound for given intervals. <END_TASK> <USER_TASK:> Description: def glb(self, other): """ Return the greatest lower bound for given intervals. :param other: AbstractInterval instance """
return self.__class__( [ min(self.lower, other.lower), min(self.upper, other.upper) ], lower_inc=self.lower_inc if self < other else other.lower_inc, upper_inc=self.upper_inc if self > other else other.upper_inc, )
<SYSTEM_TASK:> Return the least upper bound for given intervals. <END_TASK> <USER_TASK:> Description: def lub(self, other): """ Return the least upper bound for given intervals. :param other: AbstractInterval instance """
return self.__class__( [ max(self.lower, other.lower), max(self.upper, other.upper), ], lower_inc=self.lower_inc if self < other else other.lower_inc, upper_inc=self.upper_inc if self > other else other.upper_inc, )
<SYSTEM_TASK:> Computes the minimum number of tags required in window for an island to be enriched. <END_TASK> <USER_TASK:> Description: def compute_enriched_threshold(average_window_readcount): # type: (float) -> int """ Computes the minimum number of tags required in window for an island to be enriched. """
current_threshold, survival_function = 0, 1 for current_threshold in count(start=0, step=1): survival_function -= poisson.pmf(current_threshold, average_window_readcount) if survival_function <= WINDOW_P_VALUE: break island_enriched_threshold = current_threshold + 1 return island_enriched_threshold
<SYSTEM_TASK:> Computes logfactorial regularly for tractable numbers, uses Ramanujans approximation otherwise. <END_TASK> <USER_TASK:> Description: def _factln(num): # type: (int) -> float """ Computes logfactorial regularly for tractable numbers, uses Ramanujans approximation otherwise. """
if num < 20: log_factorial = log(factorial(num)) else: log_factorial = num * log(num) - num + log(num * (1 + 4 * num * ( 1 + 2 * num))) / 6.0 + log(pi) / 2 return log_factorial
<SYSTEM_TASK:> Add enriched bins based on bed files. <END_TASK> <USER_TASK:> Description: def add_new_enriched_bins_matrixes(region_files, dfs, bin_size): """Add enriched bins based on bed files. There is no way to find the correspondence between region file and matrix file, but it does not matter."""
dfs = _remove_epic_enriched(dfs) names = ["Enriched_" + os.path.basename(r) for r in region_files] regions = region_files_to_bins(region_files, names, bin_size) new_dfs = OrderedDict() assert len(regions.columns) == len(dfs) for region, (n, df) in zip(regions, dfs.items()): region_col = regions[region] df = df.join(region_col, how="outer").fillna(0) new_dfs[n] = df return new_dfs
<SYSTEM_TASK:> Merges data from the two strands into strand-agnostic counts. <END_TASK> <USER_TASK:> Description: def merge_chromosome_dfs(df_tuple): # type: (Tuple[pd.DataFrame, pd.DataFrame]) -> pd.DataFrame """Merges data from the two strands into strand-agnostic counts."""
plus_df, minus_df = df_tuple index_cols = "Chromosome Bin".split() count_column = plus_df.columns[0] if plus_df.empty: return return_other(minus_df, count_column, index_cols) if minus_df.empty: return return_other(plus_df, count_column, index_cols) # sum duplicate bins # TODO: why are there duplicate bins here in the first place? plus_df = plus_df.groupby(index_cols).sum() minus_df = minus_df.groupby(index_cols).sum() # first sum the two bins from each strand df = pd.concat([plus_df, minus_df], axis=1).fillna(0).sum(axis=1) df = df.reset_index().sort_values(by="Bin") df.columns = ["Chromosome", "Bin", count_column] df = df.sort_values(["Chromosome", "Bin"]) df[["Bin", count_column]] = df[["Bin", count_column]].astype(int32) df = df[[count_column, "Chromosome", "Bin"]] return df.reset_index(drop=True)
<SYSTEM_TASK:> Can probably be heavily optimized. <END_TASK> <USER_TASK:> Description: def add_to_island_expectations_dict(average_window_readcount, current_max_scaled_score, island_eligibility_threshold, island_expectations, gap_contribution): # type: ( float, int, float, Dict[int, float], float) -> Dict[int, float] """Can probably be heavily optimized. Time required to run can be seen from logging info."""
scaled_score = current_max_scaled_score + E_VALUE for index in range(current_max_scaled_score + 1, scaled_score + 1): island_expectation = 0.0 i = island_eligibility_threshold #i is the number of tags in the added window current_island = int(round(index - compute_window_score( i, average_window_readcount) / BIN_SIZE)) while (current_island >= 0): if current_island in island_expectations: island_expectation += _poisson( i, average_window_readcount) * island_expectations[ current_island] i += 1 current_island = int(round(index - compute_window_score( i, average_window_readcount) / BIN_SIZE)) island_expectation *= gap_contribution if island_expectation: island_expectations[index] = island_expectation return island_expectations
<SYSTEM_TASK:> Finds the enriched bins in a df. <END_TASK> <USER_TASK:> Description: def get_island_bins(df, window_size, genome, args): # type: (pd.DataFrame, int, str, Namespace) -> Dict[str, Set[int]] """Finds the enriched bins in a df."""
# need these chromos because the df might not have islands in all chromos chromosomes = natsorted(list(args.chromosome_sizes)) chromosome_island_bins = {} # type: Dict[str, Set[int]] df_copy = df.reset_index(drop=False) for chromosome in chromosomes: cdf = df_copy.loc[df_copy.Chromosome == chromosome] if cdf.empty: chromosome_island_bins[chromosome] = set() else: island_starts_ends = zip(cdf.Start.values.tolist(), cdf.End.values.tolist()) island_bins = chain(*[range( int(start), int(end), window_size) for start, end in island_starts_ends]) chromosome_island_bins[chromosome] = set(island_bins) return chromosome_island_bins
<SYSTEM_TASK:> Creates genome size dict from string containing data. <END_TASK> <USER_TASK:> Description: def create_genome_size_dict(genome): # type: (str) -> Dict[str,int] """Creates genome size dict from string containing data."""
size_file = get_genome_size_file(genome) size_lines = open(size_file).readlines() size_dict = {} for line in size_lines: genome, length = line.split() size_dict[genome] = int(length) return size_dict
<SYSTEM_TASK:> What does island_expectations do? <END_TASK> <USER_TASK:> Description: def compute_score_threshold(average_window_readcount, island_enriched_threshold, gap_contribution, boundary_contribution, genome_length_in_bins): # type: (float, int, float, float, float) -> float """ What does island_expectations do? """
required_p_value = poisson.pmf(island_enriched_threshold, average_window_readcount) prob = boundary_contribution * required_p_value score = -log(required_p_value) current_scaled_score = int(round(score / BIN_SIZE)) island_expectations_d = {} # type: Dict[int, float] island_expectations_d[current_scaled_score] = prob * genome_length_in_bins island_expectations_d[ 0] = boundary_contribution * genome_length_in_bins / gap_contribution current_max_scaled_score = current_scaled_score interval = int(1 / BIN_SIZE) partial_cumu = 0.0 logging.info("Finding the score required to consider an island enriched.") while (partial_cumu > E_VALUE_THRESHOLD or partial_cumu < 1e-100): current_scaled_score += interval current_max_scaled_score = current_scaled_score - interval # logging.debug(island_expectations_d) if current_scaled_score > current_max_scaled_score: # logging.debug(island_expectations_d) island_expectations_d = add_to_island_expectations_dict( average_window_readcount, current_max_scaled_score, island_enriched_threshold, island_expectations_d, gap_contribution) partial_cumu = 0.0001 current_max_scaled_score += 1000 if max(island_expectations_d) > interval: partial_cumu = sum( [val for idx, val in island_expectations_d.items() if idx > current_max_scaled_score - interval]) else: partial_cumu = sum(island_expectations_d.values()) logging.debug("Computing cumulative distribution.") score_threshold = generate_cumulative_dist(island_expectations_d, current_max_scaled_score + 1) logging.info("Enriched score threshold for islands: " + str( score_threshold)) return score_threshold
<SYSTEM_TASK:> Estimate length of reads based on 10000 first. <END_TASK> <USER_TASK:> Description: def find_readlength(args): # type: (Namespace) -> int """Estimate length of reads based on 10000 first."""
try: bed_file = args.treatment[0] except AttributeError: bed_file = args.infiles[0] filereader = "cat " if bed_file.endswith(".gz") and search("linux", platform, IGNORECASE): filereader = "zcat " elif bed_file.endswith(".gz") and search("darwin", platform, IGNORECASE): filereader = "gzcat " elif bed_file.endswith(".bz2"): filereader = "bzgrep " command = filereader + "{} | head -10000".format(bed_file) output = check_output(command, shell=True) df = pd.read_table( BytesIO(output), header=None, usecols=[1, 2], sep="\t", names=["Start", "End"]) readlengths = df.End - df.Start mean_readlength = readlengths.mean() median_readlength = readlengths.median() max_readlength = readlengths.max() min_readlength = readlengths.min() logging.info(( "Used first 10000 reads of {} to estimate a median read length of {}\n" "Mean readlength: {}, max readlength: {}, min readlength: {}.").format( bed_file, median_readlength, mean_readlength, max_readlength, min_readlength)) return median_readlength
<SYSTEM_TASK:> Find the predefined readlength closest to the estimated readlength. <END_TASK> <USER_TASK:> Description: def get_closest_readlength(estimated_readlength): # type: (int) -> int """Find the predefined readlength closest to the estimated readlength. In the case of a tie, choose the shortest readlength."""
readlengths = [36, 50, 75, 100] differences = [abs(r - estimated_readlength) for r in readlengths] min_difference = min(differences) index_of_min_difference = [i for i, d in enumerate(differences) if d == min_difference][0] return readlengths[index_of_min_difference]
<SYSTEM_TASK:> Parses the supplied output and returns the version string. <END_TASK> <USER_TASK:> Description: def parse_version(output): """ Parses the supplied output and returns the version string. :param output: A string containing the output of running snort. :returns: Version string for the version of snort run. None if not found. """
for x in output.splitlines(): match = VERSION_PATTERN.match(x) if match: return match.group('version').strip() return None
<SYSTEM_TASK:> Parses the supplied output and yields any alerts. <END_TASK> <USER_TASK:> Description: def parse_alert(output): """ Parses the supplied output and yields any alerts. Example alert format: 01/28/14-22:26:04.885446 [**] [1:1917:11] INDICATOR-SCAN UPnP service discover attempt [**] [Classification: Detection of a Network Scan] [Priority: 3] {UDP} 10.1.1.132:58650 -> 239.255.255.250:1900 :param output: A string containing the output of running snort :returns: Generator of snort alert dicts """
for x in output.splitlines(): match = ALERT_PATTERN.match(x) if match: rec = {'timestamp': datetime.strptime(match.group('timestamp'), '%m/%d/%y-%H:%M:%S.%f'), 'sid': int(match.group('sid')), 'revision': int(match.group('revision')), 'priority': int(match.group('priority')), 'message': match.group('message'), 'source': match.group('src'), 'destination': match.group('dest'), 'protocol': match.group('protocol'), } if match.group('classtype'): rec['classtype'] = match.group('classtype') yield rec
<SYSTEM_TASK:> Runs snort against the supplied pcap. <END_TASK> <USER_TASK:> Description: def run(self, pcap): """ Runs snort against the supplied pcap. :param pcap: Filepath to pcap file to scan :returns: tuple of version, list of alerts """
proc = Popen(self._snort_cmd(pcap), stdout=PIPE, stderr=PIPE, universal_newlines=True) stdout, stderr = proc.communicate() if proc.returncode != 0: raise Exception("\n".join(["Execution failed return code: {0}" \ .format(proc.returncode), stderr or ""])) return (parse_version(stderr), [ x for x in parse_alert(stdout) ])
<SYSTEM_TASK:> Runs suricata against the supplied pcap. <END_TASK> <USER_TASK:> Description: def run(self, pcap): """ Runs suricata against the supplied pcap. :param pcap: Filepath to pcap file to scan :returns: tuple of version, list of alerts """
tmpdir = None try: tmpdir = tempfile.mkdtemp(prefix='tmpsuri') proc = Popen(self._suri_cmd(pcap, tmpdir), stdout=PIPE, stderr=PIPE, universal_newlines=True) stdout, stderr = proc.communicate() if proc.returncode != 0: raise Exception("\n".join(["Execution failed return code: {0}" \ .format(proc.returncode), stderr or ""])) with open(os.path.join(tmpdir, 'fast.log')) as tmp: return (parse_version(stdout), [ x for x in parse_alert(tmp.read()) ]) finally: if tmpdir: shutil.rmtree(tmpdir)
<SYSTEM_TASK:> Run IDS across the supplied file. <END_TASK> <USER_TASK:> Description: def analyse_pcap(infile, filename): """ Run IDS across the supplied file. :param infile: File like object containing pcap data. :param filename: Filename of the submitted file. :returns: Dictionary of analysis results. """
tmp = tempfile.NamedTemporaryFile(suffix=".pcap", delete=False) m = hashlib.md5() results = {'filename': filename, 'status': 'Failed', 'apiversion': __version__, } try: size = 0 while True: buf = infile.read(16384) if not buf: break tmp.write(buf) size += len(buf) m.update(buf) tmp.close() results['md5'] = m.hexdigest() results['filesize'] = size results.update(runner.run(tmp.name)) except OSError as ex: results['stderr'] = str(ex) finally: os.remove(tmp.name) return results
<SYSTEM_TASK:> Blocking POST handler for file submission. <END_TASK> <USER_TASK:> Description: def api_submit(): """ Blocking POST handler for file submission. Runs snort on supplied file and returns results as json text. """
data = request.files.file response.content_type = 'application/json' if not data or not hasattr(data, 'file'): return json.dumps({"status": "Failed", "stderr": "Missing form params"}) return json.dumps(analyse_pcap(data.file, data.filename), default=jsondate, indent=4)
<SYSTEM_TASK:> Simple test for pcap magic bytes in supplied file. <END_TASK> <USER_TASK:> Description: def is_pcap(pcap): """ Simple test for pcap magic bytes in supplied file. :param pcap: File path to Pcap file to check :returns: True if content is pcap (magic bytes present), otherwise False. """
with open(pcap, 'rb') as tmp: header = tmp.read(4) # check for both big/little endian if header == b"\xa1\xb2\xc3\xd4" or \ header == b"\xd4\xc3\xb2\xa1": return True return False
<SYSTEM_TASK:> Runs the specified IDS runner. <END_TASK> <USER_TASK:> Description: def _run_ids(runner, pcap): """ Runs the specified IDS runner. :param runner: Runner instance to use :param pcap: File path to pcap for analysis :returns: dict of run metadata/alerts """
run = {'name': runner.conf.get('name'), 'module': runner.conf.get('module'), 'ruleset': runner.conf.get('ruleset', 'default'), 'status': STATUS_FAILED, } try: run_start = datetime.now() version, alerts = runner.run(pcap) run['version'] = version or 'Unknown' run['status'] = STATUS_SUCCESS run['alerts'] = alerts except Exception as ex: run['error'] = str(ex) finally: run['duration'] = duration(run_start) return run
<SYSTEM_TASK:> Runs all configured IDS instances against the supplied pcap. <END_TASK> <USER_TASK:> Description: def run(pcap): """ Runs all configured IDS instances against the supplied pcap. :param pcap: File path to pcap file to analyse :returns: Dict with details and results of run/s """
start = datetime.now() errors = [] status = STATUS_FAILED analyses = [] pool = ThreadPool(MAX_THREADS) try: if not is_pcap(pcap): raise Exception("Not a valid pcap file") runners = [] for conf in Config().modules.values(): runner = registry.get(conf['module']) if not runner: raise Exception("No module named: '{0}' found registered" .format(conf['module'])) runners.append(runner(conf)) # launch via worker pool analyses = [ pool.apply_async(_run_ids, (runner, pcap)) for runner in runners ] analyses = [ x.get() for x in analyses ] # were all runs successful? if all([ x['status'] == STATUS_SUCCESS for x in analyses ]): status = STATUS_SUCCESS # propagate any errors to the main list for run in [ x for x in analyses if x['status'] != STATUS_SUCCESS ]: errors.append("Failed to run {0}: {1}".format(run['name'], run['error'])) except Exception as ex: errors.append(str(ex)) return {'start': start, 'duration': duration(start), 'status': status, 'analyses': analyses, 'errors': errors, }
<SYSTEM_TASK:> Same as breadcrumb but label is not escaped. <END_TASK> <USER_TASK:> Description: def breadcrumb_safe(context, label, viewname, *args, **kwargs): """ Same as breadcrumb but label is not escaped. """
append_breadcrumb(context, _(label), viewname, args, kwargs) return ''
<SYSTEM_TASK:> Same as breadcrumb but label is not translated. <END_TASK> <USER_TASK:> Description: def breadcrumb_raw(context, label, viewname, *args, **kwargs): """ Same as breadcrumb but label is not translated. """
append_breadcrumb(context, escape(label), viewname, args, kwargs) return ''
<SYSTEM_TASK:> Same as breadcrumb but label is not escaped and translated. <END_TASK> <USER_TASK:> Description: def breadcrumb_raw_safe(context, label, viewname, *args, **kwargs): """ Same as breadcrumb but label is not escaped and translated. """
append_breadcrumb(context, label, viewname, args, kwargs) return ''
<SYSTEM_TASK:> Find the symbol of the specified name inside the module or raise an <END_TASK> <USER_TASK:> Description: def _find_symbol(self, module, name, fallback=None): """ Find the symbol of the specified name inside the module or raise an exception. """
if not hasattr(module, name) and fallback: return self._find_symbol(module, fallback, None) return getattr(module, name)
<SYSTEM_TASK:> Store the incoming activation, apply the activation function and store <END_TASK> <USER_TASK:> Description: def apply(self, incoming): """ Store the incoming activation, apply the activation function and store the result as outgoing activation. """
assert len(incoming) == self.size self.incoming = incoming outgoing = self.activation(self.incoming) assert len(outgoing) == self.size self.outgoing = outgoing
<SYSTEM_TASK:> The derivative of the activation function at the current state. <END_TASK> <USER_TASK:> Description: def delta(self, above): """ The derivative of the activation function at the current state. """
return self.activation.delta(self.incoming, self.outgoing, above)
<SYSTEM_TASK:> Evaluate the network with alternative weights on the input data and <END_TASK> <USER_TASK:> Description: def feed(self, weights, data): """ Evaluate the network with alternative weights on the input data and return the output activation. """
assert len(data) == self.layers[0].size self.layers[0].apply(data) # Propagate trough the remaining layers. connections = zip(self.layers[:-1], weights, self.layers[1:]) for previous, weight, current in connections: incoming = self.forward(weight, previous.outgoing) current.apply(incoming) # Return the activations of the output layer. return self.layers[-1].outgoing
<SYSTEM_TASK:> Define model and initialize weights. <END_TASK> <USER_TASK:> Description: def _init_network(self): """Define model and initialize weights."""
self.network = Network(self.problem.layers) self.weights = Matrices(self.network.shapes) if self.load: loaded = np.load(self.load) assert loaded.shape == self.weights.shape, ( 'weights to load must match problem definition') self.weights.flat = loaded else: self.weights.flat = np.random.normal( self.problem.weight_mean, self.problem.weight_scale, len(self.weights.flat))
<SYSTEM_TASK:> Classes needed during training. <END_TASK> <USER_TASK:> Description: def _init_training(self): # pylint: disable=redefined-variable-type """Classes needed during training."""
if self.check: self.backprop = CheckedBackprop(self.network, self.problem.cost) else: self.backprop = BatchBackprop(self.network, self.problem.cost) self.momentum = Momentum() self.decent = GradientDecent() self.decay = WeightDecay() self.tying = WeightTying(*self.problem.weight_tying) self.weights = self.tying(self.weights)
<SYSTEM_TASK:> Given a loop over batches of an iterable and an operation that should <END_TASK> <USER_TASK:> Description: def _every(times, step_size, index): """ Given a loop over batches of an iterable and an operation that should be performed every few elements. Determine whether the operation should be called for the current index. """
current = index * step_size step = current // times * times reached = current >= step overshot = current >= step + step_size return current and reached and not overshot
<SYSTEM_TASK:> Parse a single kraken-report entry and return a dictionary of taxa for its <END_TASK> <USER_TASK:> Description: def parse_tax_lvl(entry, tax_lvl_depth=[]): """ Parse a single kraken-report entry and return a dictionary of taxa for its named ranks. :type entry: dict :param entry: attributes of a single kraken-report row. :type tax_lvl_depth: list :param tax_lvl_depth: running record of taxon levels encountered in previous calls. """
# How deep in the hierarchy are we currently? Each two spaces of # indentation is one level deeper. Also parse the scientific name at this # level. depth_and_name = re.match('^( *)(.*)', entry['sci_name']) depth = len(depth_and_name.group(1))//2 name = depth_and_name.group(2) # Remove the previous levels so we're one higher than the level of the new # taxon. (This also works if we're just starting out or are going deeper.) del tax_lvl_depth[depth:] # Append the new taxon. tax_lvl_depth.append((entry['rank'], name)) # Create a tax_lvl dict for the named ranks. tax_lvl = {x[0]: x[1] for x in tax_lvl_depth if x[0] in ranks} return(tax_lvl)
<SYSTEM_TASK:> Parse a single output file from the kraken-report tool. Return a list <END_TASK> <USER_TASK:> Description: def parse_kraken_report(kdata, max_rank, min_rank): """ Parse a single output file from the kraken-report tool. Return a list of counts at each of the acceptable taxonomic levels, and a list of NCBI IDs and a formatted string representing their taxonomic hierarchies. :type kdata: str :param kdata: Contents of the kraken report file. """
# map between NCBI taxonomy IDs and the string rep. of the hierarchy taxa = OrderedDict() # the master collection of read counts (keyed on NCBI ID) counts = OrderedDict() # current rank r = 0 max_rank_idx = ranks.index(max_rank) min_rank_idx = ranks.index(min_rank) for entry in kdata: erank = entry['rank'].strip() # print("erank: "+erank) if erank in ranks: r = ranks.index(erank) # update running tally of ranks tax_lvl = parse_tax_lvl(entry) # record the reads assigned to this taxon level, and record the taxonomy string with the NCBI ID if erank in ranks and min_rank_idx >= ranks.index(entry['rank']) >= max_rank_idx: taxon_reads = int(entry["taxon_reads"]) clade_reads = int(entry["clade_reads"]) if taxon_reads > 0 or (clade_reads > 0 and entry['rank'] == min_rank): taxa[entry['ncbi_tax']] = tax_fmt(tax_lvl, r) if entry['rank'] == min_rank: counts[entry['ncbi_tax']] = clade_reads else: counts[entry['ncbi_tax']] = taxon_reads # print(" Counting {} reads at {}".format(counts[entry['ncbi_tax']], '; '.join(taxa[entry['ncbi_tax']]))) #TODO: handle subspecies #if erank == '-' and min_rank == "SS" and last_entry_indent < curr_indent: # pass return counts, taxa
<SYSTEM_TASK:> Parse all kraken-report data files into sample counts dict <END_TASK> <USER_TASK:> Description: def process_samples(kraken_reports_fp, max_rank, min_rank): """ Parse all kraken-report data files into sample counts dict and store global taxon id -> taxonomy data """
taxa = OrderedDict() sample_counts = OrderedDict() for krep_fp in kraken_reports_fp: if not osp.isfile(krep_fp): raise RuntimeError("ERROR: File '{}' not found.".format(krep_fp)) # use the kraken report filename as the sample ID sample_id = osp.splitext(osp.split(krep_fp)[1])[0] with open(krep_fp, "rt") as kf: try: kdr = csv.DictReader(kf, fieldnames=field_names, delimiter="\t") kdata = [entry for entry in kdr][1:] except OSError as oe: raise RuntimeError("ERROR: {}".format(oe)) scounts, staxa = parse_kraken_report(kdata, max_rank=max_rank, min_rank=min_rank) # update master records taxa.update(staxa) sample_counts[sample_id] = scounts return sample_counts, taxa
<SYSTEM_TASK:> Create a BIOM table from sample counts and taxonomy metadata. <END_TASK> <USER_TASK:> Description: def create_biom_table(sample_counts, taxa): """ Create a BIOM table from sample counts and taxonomy metadata. :type sample_counts: dict :param sample_counts: A dictionary of dictionaries with the first level keyed on sample ID, and the second level keyed on taxon ID with counts as values. :type taxa: dict :param taxa: A mapping between the taxon IDs from sample_counts to the full representation of the taxonomy string. The values in this dict will be used as metadata in the BIOM table. :rtype: biom.Table :return: A BIOM table containing the per-sample taxon counts and full taxonomy identifiers as metadata for each taxon. """
data = [[0 if taxid not in sample_counts[sid] else sample_counts[sid][taxid] for sid in sample_counts] for taxid in taxa] data = np.array(data, dtype=int) tax_meta = [{'taxonomy': taxa[taxid]} for taxid in taxa] gen_str = "kraken-biom v{} ({})".format(__version__, __url__) return Table(data, list(taxa), list(sample_counts), tax_meta, type="OTU table", create_date=str(dt.now().isoformat()), generated_by=gen_str, input_is_dense=True)
<SYSTEM_TASK:> Write the BIOM table to a file. <END_TASK> <USER_TASK:> Description: def write_biom(biomT, output_fp, fmt="hdf5", gzip=False): """ Write the BIOM table to a file. :type biomT: biom.table.Table :param biomT: A BIOM table containing the per-sample OTU counts and metadata to be written out to file. :type output_fp str :param output_fp: Path to the BIOM-format file that will be written. :type fmt: str :param fmt: One of: hdf5, json, tsv. The BIOM version the table will be output (2.x, 1.0, 'classic'). """
opener = open mode = 'w' if gzip and fmt != "hdf5": if not output_fp.endswith(".gz"): output_fp += ".gz" opener = gzip_open mode = 'wt' # HDF5 BIOM files are gzipped by default if fmt == "hdf5": opener = h5py.File with opener(output_fp, mode) as biom_f: if fmt == "json": biomT.to_json(biomT.generated_by, direct_io=biom_f) elif fmt == "tsv": biom_f.write(biomT.to_tsv()) else: biomT.to_hdf5(biom_f, biomT.generated_by) return output_fp
<SYSTEM_TASK:> Write out a file containing only the list of OTU IDs from the kraken data. <END_TASK> <USER_TASK:> Description: def write_otu_file(otu_ids, fp): """ Write out a file containing only the list of OTU IDs from the kraken data. One line per ID. :type otu_ids: list or iterable :param otu_ids: The OTU identifiers that will be written to file. :type fp: str :param fp: The path to the output file. """
fpdir = osp.split(fp)[0] if not fpdir == "" and not osp.isdir(fpdir): raise RuntimeError("Specified path does not exist: {}".format(fpdir)) with open(fp, 'wt') as outf: outf.write('\n'.join(otu_ids))
<SYSTEM_TASK:> Performs predictions blending using the trained weights. <END_TASK> <USER_TASK:> Description: def transform(self, X): """Performs predictions blending using the trained weights. Args: X (array-like): Predictions of different models. Returns: dict with blended predictions (key is 'y_pred'). """
assert np.shape(X)[0] == len(self._weights), ( 'BlendingOptimizer: Number of models to blend its predictions and weights does not match: ' 'n_models={}, weights_len={}'.format(np.shape(X)[0], len(self._weights))) blended_predictions = np.average(np.power(X, self._power), weights=self._weights, axis=0) ** (1.0 / self._power) return {'y_pred': blended_predictions}