text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Get all files staged for the current commit. <END_TASK> <USER_TASK:> Description: def get_staged_files(): """Get all files staged for the current commit. """
proc = subprocess.Popen(('git', 'status', '--porcelain'), stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, _ = proc.communicate() staged_files = modified_re.findall(out) return staged_files
<SYSTEM_TASK:> Called on new websocket connection. <END_TASK> <USER_TASK:> Description: def open(self): """ Called on new websocket connection. """
sess_id = self._get_sess_id() if sess_id: self.application.pc.websockets[self._get_sess_id()] = self self.write_message(json.dumps({"cmd": "status", "status": "open"})) else: self.write_message(json.dumps({"cmd": "error", "error": "Please login", "code": 401}))
<SYSTEM_TASK:> called on new websocket message, <END_TASK> <USER_TASK:> Description: def on_message(self, message): """ called on new websocket message, """
log.debug("WS MSG for %s: %s" % (self._get_sess_id(), message)) self.application.pc.redirect_incoming_message(self._get_sess_id(), message, self.request)
<SYSTEM_TASK:> Prepare links of form by mimicing pyoko's get_links method's result <END_TASK> <USER_TASK:> Description: def get_links(self, **kw): """ Prepare links of form by mimicing pyoko's get_links method's result Args: **kw: Returns: list of link dicts """
links = [a for a in dir(self) if isinstance(getattr(self, a), Model) and not a.startswith('_model')] return [ { 'field': l, 'mdl': getattr(self, l).__class__, } for l in links ]
<SYSTEM_TASK:> Fills form with data <END_TASK> <USER_TASK:> Description: def set_data(self, data): """ Fills form with data Args: data (dict): Data to assign form fields. Returns: Self. Form object. """
for name in self._fields: setattr(self, name, data.get(name)) return self
<SYSTEM_TASK:> Get input and process accordingly. <END_TASK> <USER_TASK:> Description: def get_input(source, files, threads=4, readtype="1D", combine="simple", names=None, barcoded=False): """Get input and process accordingly. Data can be: - a uncompressed, bgzip, bzip2 or gzip compressed fastq file - a uncompressed, bgzip, bzip2 or gzip compressed fasta file - a rich fastq containing additional key=value information in the description, as produced by MinKNOW and albacore with the same compression options as above - a sorted bam file - a sorted cram file - a (compressed) sequencing_summary.txt file generated by albacore Handle is passed to the proper functions to get DataFrame with metrics Multiple files of the same type can be used to extract info from, which is done in parallel Arguments: - source: defines the input data type and the function that needs to be called - files: is a list of one or more files to operate on, from the type of <source> - threads: is the amount of workers which can be used - readtype: (only relevant for summary input) and specifies which columns have to be extracted - combine: is either 'simple' or 'track', with the difference that with 'track' an additional field is created with the name of the dataset - names: if combine="track", the names to be used for the datasets. Needs to have same length as files, or None """
proc_functions = { 'fastq': ex.process_fastq_plain, 'fasta': ex.process_fasta, 'bam': ex.process_bam, 'summary': ex.process_summary, 'fastq_rich': ex.process_fastq_rich, 'fastq_minimal': ex.process_fastq_minimal, 'cram': ex.process_cram, 'ubam': ex.process_ubam, } filethreads = min(len(files), threads) threadsleft = threads - filethreads with cfutures.ProcessPoolExecutor(max_workers=filethreads) as executor: extration_function = partial(proc_functions[source], threads=threadsleft, readtype=readtype, barcoded=barcoded) datadf = combine_dfs( dfs=[out for out in executor.map(extration_function, files)], names=names or files, method=combine) if "readIDs" in datadf and pd.isna(datadf["readIDs"]).any(): datadf.drop("readIDs", axis='columns', inplace=True) datadf = calculate_start_time(datadf) logging.info("Nanoget: Gathered all metrics of {} reads".format(len(datadf))) if len(datadf) == 0: logging.critical("Nanoget: no reads retrieved.".format(len(datadf))) sys.exit("Fatal: No reads found in input.") else: return datadf
<SYSTEM_TASK:> Combine dataframes. <END_TASK> <USER_TASK:> Description: def combine_dfs(dfs, names, method): """Combine dataframes. Combination is either done simple by just concatenating the DataFrames or performs tracking by adding the name of the dataset as a column."""
if method == "track": res = list() for df, identifier in zip(dfs, names): df["dataset"] = identifier res.append(df) return pd.concat(res, ignore_index=True) elif method == "simple": return pd.concat(dfs, ignore_index=True)
<SYSTEM_TASK:> Calculate the star_time per read. <END_TASK> <USER_TASK:> Description: def calculate_start_time(df): """Calculate the star_time per read. Time data is either a "time" (in seconds, derived from summary files) or a "timestamp" (in UTC, derived from fastq_rich format) and has to be converted appropriately in a datetime format time_arr For both the time_zero is the minimal value of the time_arr, which is then used to subtract from all other times In the case of method=track (and dataset is a column in the df) then this subtraction is done per dataset """
if "time" in df: df["time_arr"] = pd.Series(df["time"], dtype='datetime64[s]') elif "timestamp" in df: df["time_arr"] = pd.Series(df["timestamp"], dtype="datetime64[ns]") else: return df if "dataset" in df: for dset in df["dataset"].unique(): time_zero = df.loc[df["dataset"] == dset, "time_arr"].min() df.loc[df["dataset"] == dset, "start_time"] = \ df.loc[df["dataset"] == dset, "time_arr"] - time_zero else: df["start_time"] = df["time_arr"] - df["time_arr"].min() return df.drop(["time", "timestamp", "time_arr"], axis=1, errors="ignore")
<SYSTEM_TASK:> Check that we don't have unknown keys in a dictionary. <END_TASK> <USER_TASK:> Description: def check_dict_expected_keys(self, expected_keys, current, dict_name): """ Check that we don't have unknown keys in a dictionary. It does not raise an error if we have less keys than expected. """
if not isinstance(current, dict): raise ParseError(u"'{}' key must be a dict".format(dict_name), YAML_EXAMPLE) expected_keys = set(expected_keys) current_keys = {key for key in current} extra_keys = current_keys - expected_keys if extra_keys: message = u"{}: the keys {} are unexpected. (allowed keys: {})" raise ParseError( message.format( dict_name, list(extra_keys), list(expected_keys), ), YAML_EXAMPLE, )
<SYSTEM_TASK:> Sets user notification message. <END_TASK> <USER_TASK:> Description: def set_message(self, title, msg, typ, url=None): """ Sets user notification message. Args: title: Msg. title msg: Msg. text typ: Msg. type url: Additional URL (if exists) Returns: Message ID. """
return self.user.send_notification(title=title, message=msg, typ=typ, url=url)
<SYSTEM_TASK:> A property that indicates if current user is logged in or not. <END_TASK> <USER_TASK:> Description: def is_auth(self): """ A property that indicates if current user is logged in or not. Returns: Boolean. """
if self.user_id is None: self.user_id = self.session.get('user_id') return bool(self.user_id)
<SYSTEM_TASK:> Create a message box <END_TASK> <USER_TASK:> Description: def msg_box(self, msg, title=None, typ='info'): """ Create a message box :param str msg: :param str title: :param str typ: 'info', 'error', 'warning' """
self.output['msgbox'] = {'type': typ, "title": title or msg[:20], "msg": msg}
<SYSTEM_TASK:> Assigns current task step to self.task <END_TASK> <USER_TASK:> Description: def _update_task(self, task): """ Assigns current task step to self.task then updates the task's data with self.task_data Args: task: Task object. """
self.task = task self.task.data.update(self.task_data) self.task_type = task.task_spec.__class__.__name__ self.spec = task.task_spec self.task_name = task.get_name() self.activity = getattr(self.spec, 'service_class', '') self._set_lane_data()
<SYSTEM_TASK:> This is method automatically called on each request and <END_TASK> <USER_TASK:> Description: def set_client_cmds(self): """ This is method automatically called on each request and updates "object_id", "cmd" and "flow" client variables from current.input. "flow" and "object_id" variables will always exists in the task_data so app developers can safely check for their values in workflows. Their values will be reset to None if they not exists in the current input data set. On the other side, if there isn't a "cmd" in the current.input cmd will be removed from task_data. """
self.task_data['cmd'] = self.input.get('cmd') self.task_data['flow'] = self.input.get('flow') filters = self.input.get('filters', {}) try: if isinstance(filters, dict): # this is the new form, others will be removed when ui be ready self.task_data['object_id'] = filters.get('object_id')['values'][0] elif filters[0]['field'] == 'object_id': self.task_data['object_id'] = filters[0]['values'][0] except: if 'object_id' in self.input: self.task_data['object_id'] = self.input.get('object_id')
<SYSTEM_TASK:> Returns valid and legal move given position <END_TASK> <USER_TASK:> Description: def generate_move(self, position): """ Returns valid and legal move given position :type: position: Board :rtype: Move """
while True: print(position) raw = input(str(self.color) + "\'s move \n") move = converter.short_alg(raw, self.color, position) if move is None: continue return move
<SYSTEM_TASK:> Finds if playing my move would make both kings meet. <END_TASK> <USER_TASK:> Description: def in_check_as_result(self, pos, move): """ Finds if playing my move would make both kings meet. :type: pos: Board :type: move: Move :rtype: bool """
test = cp(pos) test.update(move) test_king = test.get_king(move.color) return self.loc_adjacent_to_opponent_king(test_king.location, test)
<SYSTEM_TASK:> Finds if 2 kings are touching given the position of one of the kings. <END_TASK> <USER_TASK:> Description: def loc_adjacent_to_opponent_king(self, location, position): """ Finds if 2 kings are touching given the position of one of the kings. :type: location: Location :type: position: Board :rtype: bool """
for fn in self.cardinal_directions: try: if isinstance(position.piece_at_square(fn(location)), King) and \ position.piece_at_square(fn(location)).color != self.color: return True except IndexError: pass return False
<SYSTEM_TASK:> Adds all 8 cardinal directions as moves for the King if legal. <END_TASK> <USER_TASK:> Description: def add(self, func, position): """ Adds all 8 cardinal directions as moves for the King if legal. :type: function: function :type: position: Board :rtype: gen """
try: if self.loc_adjacent_to_opponent_king(func(self.location), position): return except IndexError: return if position.is_square_empty(func(self.location)): yield self.create_move(func(self.location), notation_const.MOVEMENT) elif position.piece_at_square(func(self.location)).color != self.color: yield self.create_move(func(self.location), notation_const.CAPTURE)
<SYSTEM_TASK:> Decides if given rook exists, is of this color, and has not moved so it <END_TASK> <USER_TASK:> Description: def _rook_legal_for_castle(self, rook): """ Decides if given rook exists, is of this color, and has not moved so it is eligible to castle. :type: rook: Rook :rtype: bool """
return rook is not None and \ type(rook) is Rook and \ rook.color == self.color and \ not rook.has_moved
<SYSTEM_TASK:> Checks if set of squares in between ``King`` and ``Rook`` are empty and safe <END_TASK> <USER_TASK:> Description: def _empty_not_in_check(self, position, direction): """ Checks if set of squares in between ``King`` and ``Rook`` are empty and safe for the king to castle. :type: position: Position :type: direction: function :type: times: int :rtype: bool """
def valid_square(square): return position.is_square_empty(square) and \ not self.in_check(position, square) return valid_square(direction(self.location, 1)) and \ valid_square(direction(self.location, 2))
<SYSTEM_TASK:> Adds kingside and queenside castling moves if legal <END_TASK> <USER_TASK:> Description: def add_castle(self, position): """ Adds kingside and queenside castling moves if legal :type: position: Board """
if self.has_moved or self.in_check(position): return if self.color == color.white: rook_rank = 0 else: rook_rank = 7 castle_type = { notation_const.KING_SIDE_CASTLE: { "rook_file": 7, "direction": lambda king_square, times: king_square.shift_right(times) }, notation_const.QUEEN_SIDE_CASTLE: { "rook_file": 0, "direction": lambda king_square, times: king_square.shift_left(times) } } for castle_key in castle_type: castle_dict = castle_type[castle_key] castle_rook = position.piece_at_square(Location(rook_rank, castle_dict["rook_file"])) if self._rook_legal_for_castle(castle_rook) and \ self._empty_not_in_check(position, castle_dict["direction"]): yield self.create_move(castle_dict["direction"](self.location, 2), castle_key)
<SYSTEM_TASK:> Generates list of possible moves <END_TASK> <USER_TASK:> Description: def possible_moves(self, position): """ Generates list of possible moves :type: position: Board :rtype: list """
# Chain used to combine multiple generators for move in itertools.chain(*[self.add(fn, position) for fn in self.cardinal_directions]): yield move for move in self.add_castle(position): yield move
<SYSTEM_TASK:> Finds if the king is in check or if both kings are touching. <END_TASK> <USER_TASK:> Description: def in_check(self, position, location=None): """ Finds if the king is in check or if both kings are touching. :type: position: Board :return: bool """
location = location or self.location for piece in position: if piece is not None and piece.color != self.color: if not isinstance(piece, King): for move in piece.possible_moves(position): if move.end_loc == location: return True else: if self.loc_adjacent_to_opponent_king(piece.location, position): return True return False
<SYSTEM_TASK:> Sets the keep-alive setting for the peer socket. <END_TASK> <USER_TASK:> Description: def set_keep_alive(sock, idle=10, interval=5, fails=5): """Sets the keep-alive setting for the peer socket. :param sock: Socket to be configured. :param idle: Interval in seconds after which for an idle connection a keep-alive probes is start being sent. :param interval: Interval in seconds between probes. :param fails: Maximum number of failed probes. """
import sys sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) if sys.platform in ('linux', 'linux2'): sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, idle) sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, interval) sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, fails) elif sys.platform == 'darwin': sock.setsockopt(socket.IPPROTO_TCP, 0x10, interval) else: # Do nothing precise for unsupported platforms. pass
<SYSTEM_TASK:> Creates a ``Board`` with the standard chess starting position. <END_TASK> <USER_TASK:> Description: def init_default(cls): """ Creates a ``Board`` with the standard chess starting position. :rtype: Board """
return cls([ # First rank [Rook(white, Location(0, 0)), Knight(white, Location(0, 1)), Bishop(white, Location(0, 2)), Queen(white, Location(0, 3)), King(white, Location(0, 4)), Bishop(white, Location(0, 5)), Knight(white, Location(0, 6)), Rook(white, Location(0, 7))], # Second rank [Pawn(white, Location(1, file)) for file in range(8)], # Third rank [None for _ in range(8)], # Fourth rank [None for _ in range(8)], # Fifth rank [None for _ in range(8)], # Sixth rank [None for _ in range(8)], # Seventh rank [Pawn(black, Location(6, file)) for file in range(8)], # Eighth rank [Rook(black, Location(7, 0)), Knight(black, Location(7, 1)), Bishop(black, Location(7, 2)), Queen(black, Location(7, 3)), King(black, Location(7, 4)), Bishop(black, Location(7, 5)), Knight(black, Location(7, 6)), Rook(black, Location(7, 7))] ])
<SYSTEM_TASK:> Finds the advantage a particular side possesses given a value scheme. <END_TASK> <USER_TASK:> Description: def material_advantage(self, input_color, val_scheme): """ Finds the advantage a particular side possesses given a value scheme. :type: input_color: Color :type: val_scheme: PieceValues :rtype: double """
if self.get_king(input_color).in_check(self) and self.no_moves(input_color): return -100 if self.get_king(-input_color).in_check(self) and self.no_moves(-input_color): return 100 return sum([val_scheme.val(piece, input_color) for piece in self])
<SYSTEM_TASK:> Calculates advantage after move is played <END_TASK> <USER_TASK:> Description: def advantage_as_result(self, move, val_scheme): """ Calculates advantage after move is played :type: move: Move :type: val_scheme: PieceValues :rtype: double """
test_board = cp(self) test_board.update(move) return test_board.material_advantage(move.color, val_scheme)
<SYSTEM_TASK:> Returns list of all possible moves <END_TASK> <USER_TASK:> Description: def _calc_all_possible_moves(self, input_color): """ Returns list of all possible moves :type: input_color: Color :rtype: list """
for piece in self: # Tests if square on the board is not empty if piece is not None and piece.color == input_color: for move in piece.possible_moves(self): test = cp(self) test_move = Move(end_loc=move.end_loc, piece=test.piece_at_square(move.start_loc), status=move.status, start_loc=move.start_loc, promoted_to_piece=move.promoted_to_piece) test.update(test_move) if self.king_loc_dict is None: yield move continue my_king = test.piece_at_square(self.king_loc_dict[input_color]) if my_king is None or \ not isinstance(my_king, King) or \ my_king.color != input_color: self.king_loc_dict[input_color] = test.find_king(input_color) my_king = test.piece_at_square(self.king_loc_dict[input_color]) if not my_king.in_check(test): yield move
<SYSTEM_TASK:> Finds Location of the first piece that matches piece. <END_TASK> <USER_TASK:> Description: def find_piece(self, piece): """ Finds Location of the first piece that matches piece. If none is found, Exception is raised. :type: piece: Piece :rtype: Location """
for i, _ in enumerate(self.position): for j, _ in enumerate(self.position): loc = Location(i, j) if not self.is_square_empty(loc) and \ self.piece_at_square(loc) == piece: return loc raise ValueError("{} \nPiece not found: {}".format(self, piece))
<SYSTEM_TASK:> Places piece at given get_location <END_TASK> <USER_TASK:> Description: def place_piece_at_square(self, piece, location): """ Places piece at given get_location :type: piece: Piece :type: location: Location """
self.position[location.rank][location.file] = piece piece.location = location
<SYSTEM_TASK:> Moves piece from one location to another <END_TASK> <USER_TASK:> Description: def move_piece(self, initial, final): """ Moves piece from one location to another :type: initial: Location :type: final: Location """
self.place_piece_at_square(self.piece_at_square(initial), final) self.remove_piece_at_square(initial)
<SYSTEM_TASK:> Updates position by applying selected move <END_TASK> <USER_TASK:> Description: def update(self, move): """ Updates position by applying selected move :type: move: Move """
if move is None: raise TypeError("Move cannot be type None") if self.king_loc_dict is not None and isinstance(move.piece, King): self.king_loc_dict[move.color] = move.end_loc # Invalidates en-passant for square in self: pawn = square if isinstance(pawn, Pawn): pawn.just_moved_two_steps = False # Sets King and Rook has_moved property to True is piece has moved if type(move.piece) is King or type(move.piece) is Rook: move.piece.has_moved = True elif move.status == notation_const.MOVEMENT and \ isinstance(move.piece, Pawn) and \ fabs(move.end_loc.rank - move.start_loc.rank) == 2: move.piece.just_moved_two_steps = True if move.status == notation_const.KING_SIDE_CASTLE: self.move_piece(Location(move.end_loc.rank, 7), Location(move.end_loc.rank, 5)) self.piece_at_square(Location(move.end_loc.rank, 5)).has_moved = True elif move.status == notation_const.QUEEN_SIDE_CASTLE: self.move_piece(Location(move.end_loc.rank, 0), Location(move.end_loc.rank, 3)) self.piece_at_square(Location(move.end_loc.rank, 3)).has_moved = True elif move.status == notation_const.EN_PASSANT: self.remove_piece_at_square(Location(move.start_loc.rank, move.end_loc.file)) elif move.status == notation_const.PROMOTE or \ move.status == notation_const.CAPTURE_AND_PROMOTE: try: self.remove_piece_at_square(move.start_loc) self.place_piece_at_square(move.promoted_to_piece(move.color, move.end_loc), move.end_loc) except TypeError as e: raise ValueError("Promoted to piece cannot be None in Move {}\n{}".format(repr(move), e)) return self.move_piece(move.piece.location, move.end_loc)
<SYSTEM_TASK:> get atoms paths from detached atom to attached <END_TASK> <USER_TASK:> Description: def __get_substitution_paths(g): """ get atoms paths from detached atom to attached :param g: CGRContainer :return: tuple of atoms numbers """
for n, nbrdict in g.adjacency(): for m, l in combinations(nbrdict, 2): nms = nbrdict[m]['sp_bond'] nls = nbrdict[l]['sp_bond'] if nms == (1, None) and nls == (None, 1): yield m, n, l elif nms == (None, 1) and nls == (1, None): yield l, n, m
<SYSTEM_TASK:> Creates a menu entry for given model data. <END_TASK> <USER_TASK:> Description: def _add_crud(self, model_data, object_type, results): """ Creates a menu entry for given model data. Updates results in place. Args: model_data: Model data. object_type: Relation name. results: Results dict. """
model = model_registry.get_model(model_data['name']) field_name = model_data.get('field') verbose_name = model_data.get('verbose_name', model.Meta.verbose_name_plural) category = model_data.get('category', settings.DEFAULT_OBJECT_CATEGORY_NAME) wf_dict = {"text": verbose_name, "wf": model_data.get('wf', "crud"), "model": model_data['name'], "kategori": category} if field_name: wf_dict['param'] = field_name results[object_type].append(wf_dict) self._add_to_quick_menu(wf_dict['model'], wf_dict)
<SYSTEM_TASK:> Creates menu entries for custom workflows. <END_TASK> <USER_TASK:> Description: def _get_workflow_menus(self): """ Creates menu entries for custom workflows. Returns: Dict of list of dicts (``{'':[{}],}``). Menu entries. """
results = defaultdict(list) from zengine.lib.cache import WFSpecNames for name, title, category in WFSpecNames().get_or_set(): if self.current.has_permission(name) and category != 'hidden': wf_dict = { "text": title, "wf": name, "kategori": category, "param": "id" } results['other'].append(wf_dict) self._add_to_quick_menu(name, wf_dict) return results
<SYSTEM_TASK:> Creates connection to RabbitMQ server <END_TASK> <USER_TASK:> Description: def connect(self): """ Creates connection to RabbitMQ server """
if self.connecting: log.info('PikaClient: Already connecting to RabbitMQ') return log.info('PikaClient: Connecting to RabbitMQ') self.connecting = True self.connection = TornadoConnection(NON_BLOCKING_MQ_PARAMS, stop_ioloop_on_close=False, custom_ioloop=self.io_loop, on_open_callback=self.on_connected)
<SYSTEM_TASK:> AMQP connection callback. <END_TASK> <USER_TASK:> Description: def on_connected(self, connection): """ AMQP connection callback. Creates input channel. Args: connection: AMQP connection """
log.info('PikaClient: connected to RabbitMQ') self.connected = True self.in_channel = self.connection.channel(self.on_channel_open)
<SYSTEM_TASK:> Input channel creation callback <END_TASK> <USER_TASK:> Description: def on_channel_open(self, channel): """ Input channel creation callback Queue declaration done here Args: channel: input channel """
self.in_channel.exchange_declare(exchange='input_exc', type='topic', durable=True) channel.queue_declare(callback=self.on_input_queue_declare, queue=self.INPUT_QUEUE_NAME)
<SYSTEM_TASK:> close opened file <END_TASK> <USER_TASK:> Description: def close(self, force=False): """ close opened file :param force: force closing of externally opened file or buffer """
if self.__write: self.write = self.__write_adhoc self.__write = False if not self._is_buffer or force: self._file.close()
<SYSTEM_TASK:> convert structure to aromatic form <END_TASK> <USER_TASK:> Description: def aromatize(self): """ convert structure to aromatic form :return: number of processed rings """
rings = [x for x in self.sssr if 4 < len(x) < 7] if not rings: return 0 total = 0 while True: c = self._quinonize(rings, 'order') if c: total += c elif total: break c = self._aromatize(rings, 'order') if not c: break total += c if total: self.flush_cache() return total
<SYSTEM_TASK:> write close tag of MRV file and close opened file <END_TASK> <USER_TASK:> Description: def close(self, *args, **kwargs): """ write close tag of MRV file and close opened file :param force: force closing of externally opened file or buffer """
if not self.__finalized: self._file.write('</cml>') self.__finalized = True super().close(*args, **kwargs)
<SYSTEM_TASK:> write single molecule or reaction into file <END_TASK> <USER_TASK:> Description: def write(self, data): """ write single molecule or reaction into file """
self._file.write('<cml>') self.__write(data) self.write = self.__write
<SYSTEM_TASK:> List task invitations of current user <END_TASK> <USER_TASK:> Description: def get_tasks(current): """ List task invitations of current user .. code-block:: python # request: { 'view': '_zops_get_tasks', 'state': string, # one of these: # "active", "future", "finished", "expired" 'inverted': boolean, # search on other people's tasks 'query': string, # optional. for searching on user's tasks 'wf_type': string, # optional. only show tasks of selected wf_type 'start_date': datetime, # optional. only show tasks starts after this date 'finish_date': datetime, # optional. only show tasks should end before this date } # response: { 'task_list': [ {'token': key, # wf token (key of WFInstance) {'key': key, # wf token (key of TaskInvitation) 'title': string, # name of workflow 'wf_type': string, # unread message count 'title': string, # task title 'state': int, # state of invitation # zengine.models.workflow_manager.TASK_STATES 'start_date': string, # start date 'finish_date': string, # end date },], 'active_task_count': int, 'future_task_count': int, 'finished_task_count': int, 'expired_task_count': int, } """
# TODO: Also return invitations for user's other roles # TODO: Handle automatic role switching STATE_DICT = { 'active': [20, 30], 'future': 10, 'finished': 40, 'expired': 90 } state = STATE_DICT[current.input['state']] if isinstance(state, list): queryset = TaskInvitation.objects.filter(progress__in=state) else: queryset = TaskInvitation.objects.filter(progress=state) if 'inverted' in current.input: # show other user's tasks allowed_workflows = [bpmn_wf.name for bpmn_wf in BPMNWorkflow.objects.all() if current.has_permission(bpmn_wf.name)] queryset = queryset.exclude(role_id=current.role_id).filter(wf_name__in=allowed_workflows) else: # show current user's tasks queryset = queryset.filter(role_id=current.role_id) if 'query' in current.input: queryset = queryset.filter(search_data__contains=current.input['query'].lower()) if 'wf_type' in current.input: queryset = queryset.filter(wf_name=current.input['wf_type']) if 'start_date' in current.input: queryset = queryset.filter(start_date__gte=datetime.strptime(current.input['start_date'], "%d.%m.%Y")) if 'finish_date' in current.input: queryset = queryset.filter(finish_date__lte=datetime.strptime(current.input['finish_date'], "%d.%m.%Y")) current.output['task_list'] = [ { 'token': inv.instance.key, 'key': inv.key, 'title': inv.title, 'wf_type': inv.wf_name, 'state': inv.progress, 'start_date': format_date(inv.start_date), 'finish_date': format_date(inv.finish_date), 'description': inv.instance.wf.description, 'status': inv.ownership} for inv in queryset ] task_inv_list = TaskInvitation.objects.filter(role_id=current.role_id) current.output['task_count']= { 'active': task_inv_list.filter(progress__in=STATE_DICT['active']).count(), 'future' : task_inv_list.filter(progress=STATE_DICT['future']).count(), 'finished' : task_inv_list.filter(progress=STATE_DICT['finished']).count(), 'expired' : task_inv_list.filter(progress=STATE_DICT['expired']).count() }
<SYSTEM_TASK:> reduce memory usage of the dataframe <END_TASK> <USER_TASK:> Description: def reduce_memory_usage(df): """reduce memory usage of the dataframe - convert runIDs to categorical - downcast ints and floats """
usage_pre = df.memory_usage(deep=True).sum() if "runIDs" in df: df.loc[:, "runIDs"] = df.loc[:, "runIDs"].astype("category") df_int = df.select_dtypes(include=['int']) df_float = df.select_dtypes(include=['float']) df.loc[:, df_int.columns] = df_int.apply(pd.to_numeric, downcast='unsigned') df.loc[:, df_float.columns] = df_float.apply(pd.to_numeric, downcast='float') usage_post = df.memory_usage(deep=True).sum() logging.info("Reduced DataFrame memory usage from {}Mb to {}Mb".format( usage_pre / 1024**2, usage_post / 1024**2)) if usage_post > 4e9 and "readIDs" in df: logging.info("DataFrame of features is too big, dropping read identifiers.") return df.drop(["readIDs"], axis=1, errors="ignore") else: return df
<SYSTEM_TASK:> Check if the file supplied as input exists. <END_TASK> <USER_TASK:> Description: def check_existance(f): """Check if the file supplied as input exists."""
if not opath.isfile(f): logging.error("Nanoget: File provided doesn't exist or the path is incorrect: {}".format(f)) sys.exit("File provided doesn't exist or the path is incorrect: {}".format(f))
<SYSTEM_TASK:> Lists user roles as selectable except user's current role. <END_TASK> <USER_TASK:> Description: def list_user_roles(self): """ Lists user roles as selectable except user's current role. """
_form = JsonForm(current=self.current, title=_(u"Switch Role")) _form.help_text = "Your current role: %s %s" % (self.current.role.unit.name, self.current.role.abstract_role.name) switch_roles = self.get_user_switchable_roles() _form.role_options = fields.Integer(_(u"Please, choose the role you want to switch:") , choices=switch_roles, default=switch_roles[0][0], required=True) _form.switch = fields.Button(_(u"Switch")) self.form_out(_form)
<SYSTEM_TASK:> Changes user's role from current role to chosen role. <END_TASK> <USER_TASK:> Description: def change_user_role(self): """ Changes user's role from current role to chosen role. """
# Get chosen role_key from user form. role_key = self.input['form']['role_options'] # Assign chosen switch role key to user's last_login_role_key field self.current.user.last_login_role_key = role_key self.current.user.save() auth = AuthBackend(self.current) # According to user's new role, user's session set again. auth.set_user(self.current.user) # Dashboard is reloaded according to user's new role. self.current.output['cmd'] = 'reload'
<SYSTEM_TASK:> Serializer for consistency <END_TASK> <USER_TASK:> Description: def json_dumps(self, obj): """Serializer for consistency"""
return json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '))
<SYSTEM_TASK:> Santize obj name into fname and verify doesn't already exist <END_TASK> <USER_TASK:> Description: def safe_filename(self, otype, oid): """Santize obj name into fname and verify doesn't already exist"""
permitted = set(['_', '-', '(', ')']) oid = ''.join([c for c in oid if c.isalnum() or c in permitted]) while oid.find('--') != -1: oid = oid.replace('--', '-') ext = 'json' ts = datetime.now().strftime("%Y%m%dT%H%M%S") fname = '' is_new = False while not is_new: oid_len = 255 - len('%s--%s.%s' % (otype, ts, ext)) fname = '%s-%s-%s.%s' % (otype, oid[:oid_len], ts, ext) is_new = True if os.path.exists(fname): is_new = False ts += '-bck' return fname
<SYSTEM_TASK:> Write a list of related objs to file <END_TASK> <USER_TASK:> Description: def write_pkg_to_file(self, name, objects, path='.', filename=None): """Write a list of related objs to file"""
# Kibana uses an array of docs, do the same # as opposed to a dict of docs pkg_objs = [] for _, obj in iteritems(objects): pkg_objs.append(obj) sorted_pkg = sorted(pkg_objs, key=lambda k: k['_id']) output = self.json_dumps(sorted_pkg) + '\n' if filename is None: filename = self.safe_filename('Pkg', name) filename = os.path.join(path, filename) self.pr_inf("Writing to file: " + filename) with open(filename, 'w') as f: f.write(output) return filename
<SYSTEM_TASK:> Get DB and all objs needed to duplicate it <END_TASK> <USER_TASK:> Description: def get_dashboard_full(self, db_name): """Get DB and all objs needed to duplicate it"""
objects = {} dashboards = self.get_objects("type", "dashboard") vizs = self.get_objects("type", "visualization") searches = self.get_objects("type", "search") if db_name not in dashboards: return None self.pr_inf("Found dashboard: " + db_name) objects[db_name] = dashboards[db_name] panels = json.loads(dashboards[db_name]['_source']['panelsJSON']) for panel in panels: if 'id' not in panel: continue pid = panel['id'] if pid in searches: self.pr_inf("Found search: " + pid) objects[pid] = searches[pid] elif pid in vizs: self.pr_inf("Found vis: " + pid) objects[pid] = vizs[pid] emb = vizs[pid].get('_source', {}).get('savedSearchId', None) if emb is not None and emb not in objects: if emb not in searches: self.pr_err('Missing search %s' % emb) return objects objects[emb] = searches[emb] return objects
<SYSTEM_TASK:> Generates wf packages from workflow diagrams. <END_TASK> <USER_TASK:> Description: def package_in_memory(cls, workflow_name, workflow_files): """ Generates wf packages from workflow diagrams. Args: workflow_name: Name of wf workflow_files: Diagram file. Returns: Workflow package (file like) object """
s = StringIO() p = cls(s, workflow_name, meta_data=[]) p.add_bpmn_files_by_glob(workflow_files) p.create_package() return s.getvalue()
<SYSTEM_TASK:> condense reaction container to CGR. see init for details about cgr_type <END_TASK> <USER_TASK:> Description: def compose(self, data): """ condense reaction container to CGR. see init for details about cgr_type :param data: ReactionContainer :return: CGRContainer """
g = self.__separate(data) if self.__cgr_type in (1, 2, 3, 4, 5, 6) else self.__condense(data) g.meta.update(data.meta) return g
<SYSTEM_TASK:> implementation of bond addition <END_TASK> <USER_TASK:> Description: def add_bond(self, atom1, atom2, bond): """ implementation of bond addition """
if atom1 == atom2: raise KeyError('atom loops impossible') if atom1 not in self._node or atom2 not in self._node: raise KeyError('atoms not found') if atom1 in self._adj[atom2]: raise KeyError('atoms already bonded') attr_dict = self.edge_attr_dict_factory() if isinstance(bond, int): attr_dict.order = bond else: attr_dict.update(bond) self._adj[atom1][atom2] = self._adj[atom2][atom1] = attr_dict self.flush_cache()
<SYSTEM_TASK:> implementation of bond removing <END_TASK> <USER_TASK:> Description: def delete_bond(self, n, m): """ implementation of bond removing """
self.remove_edge(n, m) self.flush_cache()
<SYSTEM_TASK:> create substructure containing atoms and their neighbors <END_TASK> <USER_TASK:> Description: def augmented_substructure(self, atoms, dante=False, deep=1, meta=False, as_view=True): """ create substructure containing atoms and their neighbors :param atoms: list of core atoms in graph :param dante: if True return list of graphs containing atoms, atoms + first circle, atoms + 1st + 2nd, etc up to deep or while new nodes available :param deep: number of bonds between atoms and neighbors :param meta: copy metadata to each substructure :param as_view: If True, the returned graph-view provides a read-only view of the original graph without actually copying any data """
nodes = [set(atoms)] for i in range(deep): n = {y for x in nodes[-1] for y in self._adj[x]} | nodes[-1] if n in nodes: break nodes.append(n) if dante: return [self.substructure(a, meta, as_view) for a in nodes] else: return self.substructure(nodes[-1], meta, as_view)
<SYSTEM_TASK:> split disconnected structure to connected substructures <END_TASK> <USER_TASK:> Description: def split(self, meta=False): """ split disconnected structure to connected substructures :param meta: copy metadata to each substructure :return: list of substructures """
return [self.substructure(c, meta, False) for c in connected_components(self)]
<SYSTEM_TASK:> iterate other all bonds <END_TASK> <USER_TASK:> Description: def bonds(self): """ iterate other all bonds """
seen = set() for n, m_bond in self._adj.items(): seen.add(n) for m, bond in m_bond.items(): if m not in seen: yield n, m, bond
<SYSTEM_TASK:> need for cyclic import solving <END_TASK> <USER_TASK:> Description: def _get_subclass(name): """ need for cyclic import solving """
return next(x for x in BaseContainer.__subclasses__() if x.__name__ == name)
<SYSTEM_TASK:> Creates a urllib3.PoolManager object that has SSL verification enabled <END_TASK> <USER_TASK:> Description: def _default_make_pool(http, proxy_info): """Creates a urllib3.PoolManager object that has SSL verification enabled and uses the certifi certificates."""
if not http.ca_certs: http.ca_certs = _certifi_where_for_ssl_version() ssl_disabled = http.disable_ssl_certificate_validation cert_reqs = 'CERT_REQUIRED' if http.ca_certs and not ssl_disabled else None if isinstance(proxy_info, collections.Callable): proxy_info = proxy_info() if proxy_info: if proxy_info.proxy_user and proxy_info.proxy_pass: proxy_url = 'http://{}:{}@{}:{}/'.format( proxy_info.proxy_user, proxy_info.proxy_pass, proxy_info.proxy_host, proxy_info.proxy_port, ) proxy_headers = urllib3.util.request.make_headers( proxy_basic_auth='{}:{}'.format( proxy_info.proxy_user, proxy_info.proxy_pass, ) ) else: proxy_url = 'http://{}:{}/'.format( proxy_info.proxy_host, proxy_info.proxy_port, ) proxy_headers = {} return urllib3.ProxyManager( proxy_url=proxy_url, proxy_headers=proxy_headers, ca_certs=http.ca_certs, cert_reqs=cert_reqs, ) return urllib3.PoolManager( ca_certs=http.ca_certs, cert_reqs=cert_reqs, )
<SYSTEM_TASK:> Monkey-patches httplib2.Http to be httplib2shim.Http. <END_TASK> <USER_TASK:> Description: def patch(make_pool=_default_make_pool): """Monkey-patches httplib2.Http to be httplib2shim.Http. This effectively makes all clients of httplib2 use urlilb3. It's preferable to specify httplib2shim.Http explicitly where you can, but this can be useful in situations where you do not control the construction of the http object. Args: make_pool: A function that returns a urllib3.Pool-like object. This allows you to specify special arguments to your connection pool if needed. By default, this will create a urllib3.PoolManager with SSL verification enabled using the certifi certificates. """
setattr(httplib2, '_HttpOriginal', httplib2.Http) httplib2.Http = Http Http._make_pool = make_pool
<SYSTEM_TASK:> Checks if a given address is an IPv6 address. <END_TASK> <USER_TASK:> Description: def _is_ipv6(addr): """Checks if a given address is an IPv6 address."""
try: socket.inet_pton(socket.AF_INET6, addr) return True except socket.error: return False
<SYSTEM_TASK:> Gets the right location for certifi certifications for the current SSL <END_TASK> <USER_TASK:> Description: def _certifi_where_for_ssl_version(): """Gets the right location for certifi certifications for the current SSL version. Older versions of SSL don't support the stronger set of root certificates. """
if not ssl: return if ssl.OPENSSL_VERSION_INFO < (1, 0, 2): warnings.warn( 'You are using an outdated version of OpenSSL that ' 'can\'t use stronger root certificates.') return certifi.old_where() return certifi.where()
<SYSTEM_TASK:> Properly close the AMQP connections <END_TASK> <USER_TASK:> Description: def exit(self, signal=None, frame=None): """ Properly close the AMQP connections """
self.input_channel.close() self.client_queue.close() self.connection.close() log.info("Worker exiting") sys.exit(0)
<SYSTEM_TASK:> make amqp connection and create channels and queue binding <END_TASK> <USER_TASK:> Description: def connect(self): """ make amqp connection and create channels and queue binding """
self.connection = pika.BlockingConnection(BLOCKING_MQ_PARAMS) self.client_queue = ClientQueue() self.input_channel = self.connection.channel() self.input_channel.exchange_declare(exchange=self.INPUT_EXCHANGE, type='topic', durable=True) self.input_channel.queue_declare(queue=self.INPUT_QUEUE_NAME) self.input_channel.queue_bind(exchange=self.INPUT_EXCHANGE, queue=self.INPUT_QUEUE_NAME) log.info("Bind to queue named '%s' queue with exchange '%s'" % (self.INPUT_QUEUE_NAME, self.INPUT_EXCHANGE))
<SYSTEM_TASK:> clear outs all messages from INPUT_QUEUE_NAME <END_TASK> <USER_TASK:> Description: def clear_queue(self): """ clear outs all messages from INPUT_QUEUE_NAME """
def remove_message(ch, method, properties, body): print("Removed message: %s" % body) self.input_channel.basic_consume(remove_message, queue=self.INPUT_QUEUE_NAME, no_ack=True) try: self.input_channel.start_consuming() except (KeyboardInterrupt, SystemExit): log.info(" Exiting") self.exit()
<SYSTEM_TASK:> actual consuming of incoming works starts here <END_TASK> <USER_TASK:> Description: def run(self): """ actual consuming of incoming works starts here """
self.input_channel.basic_consume(self.handle_message, queue=self.INPUT_QUEUE_NAME, no_ack=True ) try: self.input_channel.start_consuming() except (KeyboardInterrupt, SystemExit): log.info(" Exiting") self.exit()
<SYSTEM_TASK:> this is a pika.basic_consumer callback <END_TASK> <USER_TASK:> Description: def handle_message(self, ch, method, properties, body): """ this is a pika.basic_consumer callback handles client inputs, runs appropriate workflows and views Args: ch: amqp channel method: amqp method properties: body: message body """
input = {} headers = {} try: self.sessid = method.routing_key input = json_decode(body) data = input['data'] # since this comes as "path" we dont know if it's view or workflow yet # TODO: just a workaround till we modify ui to if 'path' in data: if data['path'] in VIEW_METHODS: data['view'] = data['path'] else: data['wf'] = data['path'] session = Session(self.sessid) headers = {'remote_ip': input['_zops_remote_ip'], 'source': input['_zops_source']} if 'wf' in data: output = self._handle_workflow(session, data, headers) elif 'job' in data: self._handle_job(session, data, headers) return else: output = self._handle_view(session, data, headers) except HTTPError as e: import sys if hasattr(sys, '_called_from_test'): raise output = {"cmd": "error", "error": self._prepare_error_msg(e.message), "code": e.code} log.exception("Http error occurred") except: self.current = Current(session=session, input=data) self.current.headers = headers import sys if hasattr(sys, '_called_from_test'): raise err = traceback.format_exc() output = {"cmd": "error", "error": self._prepare_error_msg(err), "code": 500} log.exception("Worker error occurred with messsage body:\n%s" % body) if 'callbackID' in input: output['callbackID'] = input['callbackID'] log.info("OUTPUT for %s: %s" % (self.sessid, output)) output['reply_timestamp'] = time() self.send_output(output)
<SYSTEM_TASK:> BG Job for storing wf state to DB <END_TASK> <USER_TASK:> Description: def sync_wf_cache(current): """ BG Job for storing wf state to DB """
wf_cache = WFCache(current) wf_state = wf_cache.get() # unicode serialized json to dict, all values are unicode if 'role_id' in wf_state: # role_id inserted by engine, so it's a sign that we get it from cache not db try: wfi = WFInstance.objects.get(key=current.input['token']) except ObjectDoesNotExist: # wf's that not started from a task invitation wfi = WFInstance(key=current.input['token']) wfi.wf = BPMNWorkflow.objects.get(name=wf_state['name']) if not wfi.current_actor.exist: # we just started the wf try: inv = TaskInvitation.objects.get(instance=wfi, role_id=wf_state['role_id']) inv.delete_other_invitations() inv.progress = 20 inv.save() except ObjectDoesNotExist: current.log.exception("Invitation not found: %s" % wf_state) except MultipleObjectsReturned: current.log.exception("Multiple invitations found: %s" % wf_state) wfi.step = wf_state['step'] wfi.name = wf_state['name'] wfi.pool = wf_state['pool'] wfi.current_actor_id = str(wf_state['role_id']) # keys must be str not unicode wfi.data = wf_state['data'] if wf_state['finished']: wfi.finished = True wfi.finish_date = wf_state['finish_date'] wf_cache.delete() wfi.save() else: # if cache already cleared, we have nothing to sync pass
<SYSTEM_TASK:> Tries to get WF description from 'collabration' or 'process' or 'pariticipant' <END_TASK> <USER_TASK:> Description: def get_description(self): """ Tries to get WF description from 'collabration' or 'process' or 'pariticipant' Returns str: WF description """
paths = ['bpmn:collaboration/bpmn:participant/bpmn:documentation', 'bpmn:collaboration/bpmn:documentation', 'bpmn:process/bpmn:documentation'] for path in paths: elm = self.root.find(path, NS) if elm is not None and elm.text: return elm.text
<SYSTEM_TASK:> will create a WFInstance per object <END_TASK> <USER_TASK:> Description: def create_tasks(self): """ will create a WFInstance per object and per TaskInvitation for each role and WFInstance """
roles = self.get_roles() if self.task_type in ["A", "D"]: instances = self.create_wf_instances(roles=roles) self.create_task_invitation(instances) elif self.task_type in ["C", "B"]: instances = self.create_wf_instances() self.create_task_invitation(instances, roles)
<SYSTEM_TASK:> Fetches model objects by filtering with kwargs <END_TASK> <USER_TASK:> Description: def get_model_objects(model, wfi_role=None, **kwargs): """ Fetches model objects by filtering with kwargs If wfi_role is specified, then we expect kwargs contains a filter value starting with role, e.g. {'user': 'role.program.user'} We replace this `role` key with role instance parameter `wfi_role` and try to get object that filter value 'role.program.user' points by iterating `getattribute`. At the end filter argument becomes {'user': user}. Args: model (Model): Model class wfi_role (Role): role instance of wf instance **kwargs: filter arguments Returns: (list): list of model object instances """
query_dict = {} for k, v in kwargs.items(): if isinstance(v, list): query_dict[k] = [str(x) for x in v] else: parse = str(v).split('.') if parse[0] == 'role' and wfi_role: query_dict[k] = wfi_role for i in range(1, len(parse)): query_dict[k] = query_dict[k].__getattribute__(parse[i]) else: query_dict[k] = parse[0] return model.objects.all(**query_dict)
<SYSTEM_TASK:> can be removed when a proper task manager admin interface implemented <END_TASK> <USER_TASK:> Description: def post_save(self): """can be removed when a proper task manager admin interface implemented"""
if self.run: self.run = False self.create_tasks() self.save()
<SYSTEM_TASK:> When one person use an invitation, we should delete other invitations <END_TASK> <USER_TASK:> Description: def delete_other_invitations(self): """ When one person use an invitation, we should delete other invitations """
# TODO: Signal logged-in users to remove the task from their task list self.objects.filter(instance=self.instance).exclude(key=self.key).delete()
<SYSTEM_TASK:> write wf state to DB through MQ >> Worker >> _zops_sync_wf_cache <END_TASK> <USER_TASK:> Description: def save(self, wf_state): """ write wf state to DB through MQ >> Worker >> _zops_sync_wf_cache Args: wf_state dict: wf state """
self.wf_state = wf_state self.wf_state['role_id'] = self.current.role_id self.set(self.wf_state) if self.wf_state['name'] not in settings.EPHEMERAL_WORKFLOWS: self.publish(job='_zops_sync_wf_cache', token=self.db_key)
<SYSTEM_TASK:> Send messages through logged in users private exchange. <END_TASK> <USER_TASK:> Description: def send_to_prv_exchange(self, user_id, message=None): """ Send messages through logged in users private exchange. Args: user_id string: User key message dict: Message object """
exchange = 'prv_%s' % user_id.lower() msg = json.dumps(message, cls=ZEngineJSONEncoder) log.debug("Sending following users \"%s\" exchange:\n%s " % (exchange, msg)) self.get_channel().publish(exchange=exchange, routing_key='', body=msg)
<SYSTEM_TASK:> decompose CGR to pair of Molecules, which represents reactants and products state of reaction <END_TASK> <USER_TASK:> Description: def decompose(self): """ decompose CGR to pair of Molecules, which represents reactants and products state of reaction :return: tuple of two molecules """
mc = self._get_subclass('MoleculeContainer') reactants = mc() products = mc() for n, atom in self.atoms(): reactants.add_atom(atom._reactant, n) products.add_atom(atom._product, n) for n, m, bond in self.bonds(): if bond._reactant is not None: reactants.add_bond(n, m, bond._reactant) if bond._product is not None: products.add_bond(n, m, bond._product) return reactants, products
<SYSTEM_TASK:> Return the a DataFrame, <END_TASK> <USER_TASK:> Description: def size_history(self,size_data): """Return the a DataFrame, indexed by day, with columns containing story size for each issue. In addition, columns are soted by Jira Issue key. First by Project and then by id number. """
def my_merge(df1, df2): # http://stackoverflow.com/questions/34411495/pandas-merge-several-dataframes res = pd.merge(df1, df2, how='outer', left_index=True, right_index=True) cols = sorted(res.columns) pairs = [] for col1, col2 in zip(cols[:-1], cols[1:]): if col1.endswith('_x') and col2.endswith('_y'): pairs.append((col1, col2)) for col1, col2 in pairs: res[col1[:-2]] = res[col1].combine_first(res[col2]) res = res.drop([col1, col2], axis=1) return res dfs_key = [] # Group the dataframe by regiment, and for each regiment, for name, group in size_data.groupby('key'): dfs = [] for row in group.itertuples(): # print(row.Index, row.fromDate,row.toDate, row.size) dates = pd.date_range(start=row.fromDate, end=row.toDate) sizes = [row.size] * len(dates) data = {'date': dates, 'size': sizes} df2 = pd.DataFrame(data, columns=['date', 'size']) pd.to_datetime(df2['date'], format=('%Y-%m-%d')) df2.set_index(['date'], inplace=True) dfs.append(df2) # df_final = reduce(lambda left,right: pd.merge(left,right), dfs) df_key = (reduce(my_merge, dfs)) df_key.columns = [name if x == 'size' else x for x in df_key.columns] dfs_key.append(df_key) df_all = (reduce(my_merge, dfs_key)) # Sort the columns based on Jira Project code and issue number mykeys = df_all.columns.values.tolist() mykeys.sort(key=lambda x: x.split('-')[0] + '-' + str(int(x.split('-')[1])).zfill(6)) df_all = df_all[mykeys] # Reindex to make sure we have all dates start, end = df_all.index.min(), df_all.index.max() df_all = df_all.reindex(pd.date_range(start, end, freq='D'), method='ffill') return df_all
<SYSTEM_TASK:> Return histogram data for the cycle times in `cycle_data`. Returns <END_TASK> <USER_TASK:> Description: def histogram(self, cycle_data, bins=10): """Return histogram data for the cycle times in `cycle_data`. Returns a dictionary with keys `bin_values` and `bin_edges` of numpy arrays """
values, edges = np.histogram(cycle_data['cycle_time'].astype('timedelta64[D]').dropna(), bins=bins) index = [] for i, v in enumerate(edges): if i == 0: continue index.append("%.01f to %.01f" % (edges[i - 1], edges[i],)) return pd.Series(values, name="Items", index=index)
<SYSTEM_TASK:> Return scatterplot data for the cycle times in `cycle_data`. Returns <END_TASK> <USER_TASK:> Description: def scatterplot(self, cycle_data): """Return scatterplot data for the cycle times in `cycle_data`. Returns a data frame containing only those items in `cycle_data` where values are set for `completed_timestamp` and `cycle_time`, and with those two columns as the first two, both normalised to whole days, and with `completed_timestamp` renamed to `completed_date`. """
columns = list(cycle_data.columns) columns.remove('cycle_time') columns.remove('completed_timestamp') columns = ['completed_timestamp', 'cycle_time'] + columns data = ( cycle_data[columns] .dropna(subset=['cycle_time', 'completed_timestamp']) .rename(columns={'completed_timestamp': 'completed_date'}) ) data['cycle_time'] = data['cycle_time'].astype('timedelta64[D]') data['completed_date'] = data['completed_date'].map(pd.Timestamp.date) return data
<SYSTEM_TASK:> get a list of lists of atoms of reaction centers <END_TASK> <USER_TASK:> Description: def centers_list(self): """ get a list of lists of atoms of reaction centers """
center = set() adj = defaultdict(set) for n, atom in self.atoms(): if atom._reactant != atom._product: center.add(n) for n, m, bond in self.bonds(): if bond._reactant != bond._product: adj[n].add(m) adj[m].add(n) center.add(n) center.add(m) out = [] while center: n = center.pop() if n in adj: c = set(self.__plain_bfs(adj, n)) out.append(list(c)) center.difference_update(c) else: out.append([n]) return out
<SYSTEM_TASK:> Returns authorization token provided by Cocaine. <END_TASK> <USER_TASK:> Description: def token(self): """ Returns authorization token provided by Cocaine. The real meaning of the token is determined by its type. For example OAUTH2 token will have "bearer" type. :return: A tuple of token type and body. """
if self._token is None: token_type = os.getenv(TOKEN_TYPE_KEY, '') token_body = os.getenv(TOKEN_BODY_KEY, '') self._token = _Token(token_type, token_body) return self._token
<SYSTEM_TASK:> Send a message lazy formatted with args. <END_TASK> <USER_TASK:> Description: def _send(self): """ Send a message lazy formatted with args. External log attributes can be passed via named attribute `extra`, like in logging from the standart library. Note: * Attrs must be dict, otherwise the whole message would be skipped. * The key field in an attr is converted to string. * The value is sent as is if isinstance of (str, unicode, int, float, long, bool), otherwise we convert the value to string. """
buff = BytesIO() while True: msgs = list() try: msg = yield self.queue.get() # we need to connect first, as we issue verbosity request just after connection # and channels should strictly go in ascending order if not self._connected: yield self.connect() try: while True: msgs.append(msg) counter = next(self.counter) msgpack_pack([counter, EMIT, msg], buff) msg = self.queue.get_nowait() except queues.QueueEmpty: pass try: yield self.pipe.write(buff.getvalue()) except Exception: pass # clean the buffer or we will end up without memory buff.truncate(0) except Exception: for message in msgs: self._log_to_fallback(message)
<SYSTEM_TASK:> Finds moves in a given direction <END_TASK> <USER_TASK:> Description: def moves_in_direction(self, direction, position): """ Finds moves in a given direction :type: direction: lambda :type: position: Board :rtype: list """
current_square = self.location while True: try: current_square = direction(current_square) except IndexError: return if self.contains_opposite_color_piece(current_square, position): yield self.create_move(current_square, notation_const.CAPTURE) if not position.is_square_empty(current_square): return yield self.create_move(current_square, notation_const.MOVEMENT)
<SYSTEM_TASK:> Returns all possible rook moves. <END_TASK> <USER_TASK:> Description: def possible_moves(self, position): """ Returns all possible rook moves. :type: position: Board :rtype: list """
for move in itertools.chain(*[self.moves_in_direction(fn, position) for fn in self.cross_fn]): yield move
<SYSTEM_TASK:> Check overlap between two arrays. <END_TASK> <USER_TASK:> Description: def overlap_status(a, b): """Check overlap between two arrays. Parameters ---------- a, b : array-like Arrays to check. Assumed to be in the same unit. Returns ------- result : {'full', 'partial', 'none'} * 'full' - ``a`` is within or same as ``b`` * 'partial' - ``a`` partially overlaps with ``b`` * 'none' - ``a`` does not overlap ``b`` """
# Get the endpoints a1, a2 = a.min(), a.max() b1, b2 = b.min(), b.max() # Do the comparison if a1 >= b1 and a2 <= b2: result = 'full' elif a2 < b1 or b2 < a1: result = 'none' else: result = 'partial' return result
<SYSTEM_TASK:> Check integrated flux for invalid values. <END_TASK> <USER_TASK:> Description: def validate_totalflux(totalflux): """Check integrated flux for invalid values. Parameters ---------- totalflux : float Integrated flux. Raises ------ synphot.exceptions.SynphotError Input is zero, negative, or not a number. """
if totalflux <= 0.0: raise exceptions.SynphotError('Integrated flux is <= 0') elif np.isnan(totalflux): raise exceptions.SynphotError('Integrated flux is NaN') elif np.isinf(totalflux): raise exceptions.SynphotError('Integrated flux is infinite')
<SYSTEM_TASK:> Check wavelengths for ``synphot`` compatibility. <END_TASK> <USER_TASK:> Description: def validate_wavelengths(wavelengths): """Check wavelengths for ``synphot`` compatibility. Wavelengths must satisfy these conditions: * valid unit type, if given * no zeroes * monotonic ascending or descending * no duplicate values Parameters ---------- wavelengths : array-like or `~astropy.units.quantity.Quantity` Wavelength values. Raises ------ synphot.exceptions.SynphotError Wavelengths unit type is invalid. synphot.exceptions.DuplicateWavelength Wavelength array contains duplicate entries. synphot.exceptions.UnsortedWavelength Wavelength array is not monotonic. synphot.exceptions.ZeroWavelength Negative or zero wavelength occurs in wavelength array. """
if isinstance(wavelengths, u.Quantity): units.validate_wave_unit(wavelengths.unit) wave = wavelengths.value else: wave = wavelengths if np.isscalar(wave): wave = [wave] wave = np.asarray(wave) # Check for zeroes if np.any(wave <= 0): raise exceptions.ZeroWavelength( 'Negative or zero wavelength occurs in wavelength array', rows=np.where(wave <= 0)[0]) # Check for monotonicity sorted_wave = np.sort(wave) if not np.alltrue(sorted_wave == wave): if np.alltrue(sorted_wave[::-1] == wave): pass # Monotonic descending is allowed else: raise exceptions.UnsortedWavelength( 'Wavelength array is not monotonic', rows=np.where(sorted_wave != wave)[0]) # Check for duplicate values if wave.size > 1: dw = sorted_wave[1:] - sorted_wave[:-1] if np.any(dw == 0): raise exceptions.DuplicateWavelength( 'Wavelength array contains duplicate entries', rows=np.where(dw == 0)[0])
<SYSTEM_TASK:> Generate wavelength array to be used for spectrum sampling. <END_TASK> <USER_TASK:> Description: def generate_wavelengths(minwave=500, maxwave=26000, num=10000, delta=None, log=True, wave_unit=u.AA): """Generate wavelength array to be used for spectrum sampling. .. math:: minwave \\le \\lambda < maxwave Parameters ---------- minwave, maxwave : float Lower and upper limits of the wavelengths. These must be values in linear space regardless of ``log``. num : int The number of wavelength values. This is only used when ``delta=None``. delta : float or `None` Delta between wavelength values. When ``log=True``, this is the spacing in log space. log : bool If `True`, the wavelength values are evenly spaced in log scale. Otherwise, spacing is linear. wave_unit : str or `~astropy.units.core.Unit` Wavelength unit. Default is Angstrom. Returns ------- waveset : `~astropy.units.quantity.Quantity` Generated wavelength set. waveset_str : str Info string associated with the result. """
wave_unit = units.validate_unit(wave_unit) if delta is not None: num = None waveset_str = 'Min: {0}, Max: {1}, Num: {2}, Delta: {3}, Log: {4}'.format( minwave, maxwave, num, delta, log) # Log space if log: logmin = np.log10(minwave) logmax = np.log10(maxwave) if delta is None: waveset = np.logspace(logmin, logmax, num, endpoint=False) else: waveset = 10 ** np.arange(logmin, logmax, delta) # Linear space else: if delta is None: waveset = np.linspace(minwave, maxwave, num, endpoint=False) else: waveset = np.arange(minwave, maxwave, delta) return waveset.astype(np.float64) * wave_unit, waveset_str
<SYSTEM_TASK:> Download CDBS data files to given root directory. <END_TASK> <USER_TASK:> Description: def download_data(cdbs_root, verbose=True, dry_run=False): """Download CDBS data files to given root directory. Download is skipped if a data file already exists. Parameters ---------- cdbs_root : str Root directory for CDBS data files. verbose : bool Print extra information to screen. dry_run : bool Go through the logic but skip the actual download. This would return a list of files that *would have been* downloaded without network calls. Use this option for debugging or testing. Raises ------ OSError Problem with directory. Returns ------- file_list : list of str A list of downloaded files. """
from .config import conf # Avoid potential circular import if not os.path.exists(cdbs_root): os.makedirs(cdbs_root, exist_ok=True) if verbose: # pragma: no cover print('Created {}'.format(cdbs_root)) elif not os.path.isdir(cdbs_root): raise OSError('{} must be a directory'.format(cdbs_root)) host = 'http://ssb.stsci.edu/cdbs/' file_list = [] if not cdbs_root.endswith(os.sep): cdbs_root += os.sep # See https://github.com/astropy/astropy/issues/8524 for cfgitem in conf.__class__.__dict__.values(): if (not isinstance(cfgitem, ConfigItem) or not cfgitem.name.endswith('file')): continue url = cfgitem() if not url.startswith(host): if verbose: # pragma: no cover print('{} is not from {}, skipping download'.format( url, host)) continue dst = url.replace(host, cdbs_root).replace('/', os.sep) if os.path.exists(dst): if verbose: # pragma: no cover print('{} already exists, skipping download'.format(dst)) continue # Create sub-directories, if needed. subdirs = os.path.dirname(dst) os.makedirs(subdirs, exist_ok=True) if not dry_run: # pragma: no cover try: src = download_file(url) copyfile(src, dst) except Exception as exc: print('Download failed - {}'.format(str(exc))) continue file_list.append(dst) if verbose: # pragma: no cover print('{} downloaded to {}'.format(url, dst)) return file_list
<SYSTEM_TASK:> Add Node, replace existing node if node with node_id is present. <END_TASK> <USER_TASK:> Description: def add(self, node): """Add Node, replace existing node if node with node_id is present."""
if not isinstance(node, Node): raise TypeError() for i, j in enumerate(self.__nodes): if j.node_id == node.node_id: self.__nodes[i] = node return self.__nodes.append(node)
<SYSTEM_TASK:> Load nodes from KLF 200, if no node_id is specified all nodes are loaded. <END_TASK> <USER_TASK:> Description: async def load(self, node_id=None): """Load nodes from KLF 200, if no node_id is specified all nodes are loaded."""
if node_id is not None: await self._load_node(node_id=node_id) else: await self._load_all_nodes()
<SYSTEM_TASK:> Pulse every timeout seconds until stopped. <END_TASK> <USER_TASK:> Description: async def loop(self): """Pulse every timeout seconds until stopped."""
while not self.stopped: self.timeout_handle = self.pyvlx.connection.loop.call_later( self.timeout_in_seconds, self.loop_timeout) await self.loop_event.wait() if not self.stopped: self.loop_event.clear() await self.pulse() self.cancel_loop_timeout() self.stopped_event.set()
<SYSTEM_TASK:> Send get state request to API to keep the connection alive. <END_TASK> <USER_TASK:> Description: async def pulse(self): """Send get state request to API to keep the connection alive."""
get_state = GetState(pyvlx=self.pyvlx) await get_state.do_api_call() if not get_state.success: raise PyVLXException("Unable to send get state.")
<SYSTEM_TASK:> An 'argument type' for integrations with the argparse module. <END_TASK> <USER_TASK:> Description: def BitmathType(bmstring): """An 'argument type' for integrations with the argparse module. For more information, see https://docs.python.org/2/library/argparse.html#type Of particular interest to us is this bit: ``type=`` can take any callable that takes a single string argument and returns the converted value I.e., ``type`` can be a function (such as this function) or a class which implements the ``__call__`` method. Example usage of the bitmath.BitmathType argparser type: >>> import bitmath >>> import argparse >>> parser = argparse.ArgumentParser() >>> parser.add_argument("--file-size", type=bitmath.BitmathType) >>> parser.parse_args("--file-size 1337MiB".split()) Namespace(file_size=MiB(1337.0)) Invalid usage includes any input that the bitmath.parse_string function already rejects. Additionally, **UNQUOTED** arguments with spaces in them are rejected (shlex.split used in the following examples to conserve single quotes in the parse_args call): >>> parser = argparse.ArgumentParser() >>> parser.add_argument("--file-size", type=bitmath.BitmathType) >>> import shlex >>> # The following is ACCEPTABLE USAGE: ... >>> parser.parse_args(shlex.split("--file-size '1337 MiB'")) Namespace(file_size=MiB(1337.0)) >>> # The following is INCORRECT USAGE because the string "1337 MiB" is not quoted! ... >>> parser.parse_args(shlex.split("--file-size 1337 MiB")) error: argument --file-size: 1337 can not be parsed into a valid bitmath object """
try: argvalue = bitmath.parse_string(bmstring) except ValueError: raise argparse.ArgumentTypeError("'%s' can not be parsed into a valid bitmath object" % bmstring) else: return argvalue
<SYSTEM_TASK:> Setup apiv2 when using PyQt4 and Python2. <END_TASK> <USER_TASK:> Description: def setup_apiv2(): """ Setup apiv2 when using PyQt4 and Python2. """
# setup PyQt api to version 2 if sys.version_info[0] == 2: logging.getLogger(__name__).debug( 'setting up SIP API to version 2') import sip try: sip.setapi("QString", 2) sip.setapi("QVariant", 2) except ValueError: logging.getLogger(__name__).critical( "failed to set up sip api to version 2 for PyQt4") raise ImportError('PyQt4')
<SYSTEM_TASK:> Read roller shutter from config. <END_TASK> <USER_TASK:> Description: def from_config(cls, pyvlx, item): """Read roller shutter from config."""
name = item['name'] ident = item['id'] subtype = item['subtype'] typeid = item['typeId'] return cls(pyvlx, ident, name, subtype, typeid)
<SYSTEM_TASK:> Read FITS or ASCII spectrum from a remote location. <END_TASK> <USER_TASK:> Description: def read_remote_spec(filename, encoding='binary', cache=True, show_progress=True, **kwargs): """Read FITS or ASCII spectrum from a remote location. Parameters ---------- filename : str Spectrum filename. encoding, cache, show_progress See :func:`~astropy.utils.data.get_readable_fileobj`. kwargs : dict Keywords acceptable by :func:`read_fits_spec` (if FITS) or :func:`read_ascii_spec` (if ASCII). Returns ------- header : dict Metadata. wavelengths, fluxes : `~astropy.units.quantity.Quantity` Wavelength and flux of the spectrum. """
with get_readable_fileobj(filename, encoding=encoding, cache=cache, show_progress=show_progress) as fd: header, wavelengths, fluxes = read_spec(fd, fname=filename, **kwargs) return header, wavelengths, fluxes
<SYSTEM_TASK:> Read FITS or ASCII spectrum. <END_TASK> <USER_TASK:> Description: def read_spec(filename, fname='', **kwargs): """Read FITS or ASCII spectrum. Parameters ---------- filename : str or file pointer Spectrum file name or pointer. fname : str Filename. This is *only* used if ``filename`` is a pointer. kwargs : dict Keywords acceptable by :func:`read_fits_spec` (if FITS) or :func:`read_ascii_spec` (if ASCII). Returns ------- header : dict Metadata. wavelengths, fluxes : `~astropy.units.quantity.Quantity` Wavelength and flux of the spectrum. Raises ------ synphot.exceptions.SynphotError Read failed. """
if isinstance(filename, str): fname = filename elif not fname: # pragma: no cover raise exceptions.SynphotError('Cannot determine filename.') if fname.endswith('fits') or fname.endswith('fit'): read_func = read_fits_spec else: read_func = read_ascii_spec return read_func(filename, **kwargs)
<SYSTEM_TASK:> Read ASCII spectrum. <END_TASK> <USER_TASK:> Description: def read_ascii_spec(filename, wave_unit=u.AA, flux_unit=units.FLAM, **kwargs): """Read ASCII spectrum. ASCII table must have following columns: #. Wavelength data #. Flux data It can have more than 2 columns but the rest is ignored. Comments are discarded. Parameters ---------- filename : str or file pointer Spectrum file name or pointer. wave_unit, flux_unit : str or `~astropy.units.core.Unit` Wavelength and flux units, which default to Angstrom and FLAM, respectively. kwargs : dict Keywords accepted by :func:`astropy.io.ascii.ui.read`. Returns ------- header : dict This is just an empty dictionary, so returned values are the same as :func:`read_fits_spec`. wavelengths, fluxes : `~astropy.units.quantity.Quantity` Wavelength and flux of the spectrum. They are set to 'float64' percision. """
header = {} dat = ascii.read(filename, **kwargs) wave_unit = units.validate_unit(wave_unit) flux_unit = units.validate_unit(flux_unit) wavelengths = dat.columns[0].data.astype(np.float64) * wave_unit fluxes = dat.columns[1].data.astype(np.float64) * flux_unit return header, wavelengths, fluxes