text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Logical shift right memory location <END_TASK> <USER_TASK:> Description: def instruction_LSR_memory(self, opcode, ea, m): """ Logical shift right memory location """
r = self.LSR(m) # log.debug("$%x LSR memory value $%x >> 1 = $%x and write it to $%x \t| %s" % ( # self.program_counter, # m, r, ea, # self.cfg.mem_info.get_shortest(ea) # )) return ea, r & 0xff
<SYSTEM_TASK:> Arithmetic shift memory right <END_TASK> <USER_TASK:> Description: def instruction_ASR_memory(self, opcode, ea, m): """ Arithmetic shift memory right """
r = self.ASR(m) # log.debug("$%x ASR memory value $%x >> 1 | Carry = $%x and write it to $%x \t| %s" % ( # self.program_counter, # m, r, ea, # self.cfg.mem_info.get_shortest(ea) # )) return ea, r & 0xff
<SYSTEM_TASK:> Arithmetic shift accumulator right <END_TASK> <USER_TASK:> Description: def instruction_ASR_register(self, opcode, register): """ Arithmetic shift accumulator right """
a = register.value r = self.ASR(a) # log.debug("$%x ASR %s value $%x >> 1 | Carry = $%x" % ( # self.program_counter, # register.name, a, r # )) register.set(r)
<SYSTEM_TASK:> Rotate accumulator left <END_TASK> <USER_TASK:> Description: def instruction_ROL_register(self, opcode, register): """ Rotate accumulator left """
a = register.value r = self.ROL(a) # log.debug("$%x ROL %s value $%x << 1 | Carry = $%x" % ( # self.program_counter, # register.name, a, r # )) register.set(r)
<SYSTEM_TASK:> Calculate the address for all indexed addressing modes <END_TASK> <USER_TASK:> Description: def get_ea_indexed(self): """ Calculate the address for all indexed addressing modes """
addr, postbyte = self.read_pc_byte() # log.debug("\tget_ea_indexed(): postbyte: $%02x (%s) from $%04x", # postbyte, byte2bit_string(postbyte), addr # ) rr = (postbyte >> 5) & 3 try: register_str = self.INDEX_POSTBYTE2STR[rr] except KeyError: raise RuntimeError("Register $%x doesn't exists! (postbyte: $%x)" % (rr, postbyte)) register_obj = self.register_str2object[register_str] register_value = register_obj.value # log.debug("\t%02x == register %s: value $%x", # rr, register_obj.name, register_value # ) if not is_bit_set(postbyte, bit=7): # bit 7 == 0 # EA = n, R - use 5-bit offset from post-byte offset = signed5(postbyte & 0x1f) ea = register_value + offset # log.debug( # "\tget_ea_indexed(): bit 7 == 0: reg.value: $%04x -> ea=$%04x + $%02x = $%04x", # register_value, register_value, offset, ea # ) return ea addr_mode = postbyte & 0x0f self.cycles += 1 offset = None # TODO: Optimized this, maybe use a dict mapping... if addr_mode == 0x0: # log.debug("\t0000 0x0 | ,R+ | increment by 1") ea = register_value register_obj.increment(1) elif addr_mode == 0x1: # log.debug("\t0001 0x1 | ,R++ | increment by 2") ea = register_value register_obj.increment(2) self.cycles += 1 elif addr_mode == 0x2: # log.debug("\t0010 0x2 | ,R- | decrement by 1") register_obj.decrement(1) ea = register_obj.value elif addr_mode == 0x3: # log.debug("\t0011 0x3 | ,R-- | decrement by 2") register_obj.decrement(2) ea = register_obj.value self.cycles += 1 elif addr_mode == 0x4: # log.debug("\t0100 0x4 | ,R | No offset") ea = register_value elif addr_mode == 0x5: # log.debug("\t0101 0x5 | B, R | B register offset") offset = signed8(self.accu_b.value) elif addr_mode == 0x6: # log.debug("\t0110 0x6 | A, R | A register offset") offset = signed8(self.accu_a.value) elif addr_mode == 0x8: # log.debug("\t1000 0x8 | n, R | 8 bit offset") offset = signed8(self.read_pc_byte()[1]) elif addr_mode == 0x9: # log.debug("\t1001 0x9 | n, R | 16 bit offset") offset = signed16(self.read_pc_word()[1]) self.cycles += 1 elif addr_mode == 0xa: # log.debug("\t1010 0xa | illegal, set ea=0") ea = 0 elif addr_mode == 0xb: # log.debug("\t1011 0xb | D, R | D register offset") # D - 16 bit concatenated reg. (A + B) offset = signed16(self.accu_d.value) # FIXME: signed16() ok? self.cycles += 1 elif addr_mode == 0xc: # log.debug("\t1100 0xc | n, PCR | 8 bit offset from program counter") __, value = self.read_pc_byte() value_signed = signed8(value) ea = self.program_counter.value + value_signed # log.debug("\tea = pc($%x) + $%x = $%x (dez.: %i + %i = %i)", # self.program_counter, value_signed, ea, # self.program_counter, value_signed, ea, # ) elif addr_mode == 0xd: # log.debug("\t1101 0xd | n, PCR | 16 bit offset from program counter") __, value = self.read_pc_word() value_signed = signed16(value) ea = self.program_counter.value + value_signed self.cycles += 1 # log.debug("\tea = pc($%x) + $%x = $%x (dez.: %i + %i = %i)", # self.program_counter, value_signed, ea, # self.program_counter, value_signed, ea, # ) elif addr_mode == 0xe: # log.error("\tget_ea_indexed(): illegal address mode, use 0xffff") ea = 0xffff # illegal elif addr_mode == 0xf: # log.debug("\t1111 0xf | [n] | 16 bit address - extended indirect") __, ea = self.read_pc_word() else: raise RuntimeError("Illegal indexed addressing mode: $%x" % addr_mode) if offset is not None: ea = register_value + offset # log.debug("\t$%x + $%x = $%x (dez: %i + %i = %i)", # register_value, offset, ea, # register_value, offset, ea # ) ea = ea & 0xffff if is_bit_set(postbyte, bit=4): # bit 4 is 1 -> Indirect # log.debug("\tIndirect addressing: get new ea from $%x", ea) ea = self.memory.read_word(ea) # log.debug("\tIndirect addressing: new ea is $%x", ea) # log.debug("\tget_ea_indexed(): return ea=$%x", ea) return ea
<SYSTEM_TASK:> Get the Python interpreter we need to use to run our Notebook daemon. <END_TASK> <USER_TASK:> Description: def discover_python(self): """Get the Python interpreter we need to use to run our Notebook daemon."""
python = sys.executable #: XXX fix this hack, uwsgi sets itself as Python #: Make better used Python interpreter autodiscovery if python.endswith("/uwsgi"): python = python.replace("/uwsgi", "/python") return python
<SYSTEM_TASK:> Get PID file name for a named notebook. <END_TASK> <USER_TASK:> Description: def get_pid(self, name): """Get PID file name for a named notebook."""
pid_file = os.path.join(self.get_work_folder(name), "notebook.pid") return pid_file
<SYSTEM_TASK:> Assume we launch Notebook with the same Python which executed us. <END_TASK> <USER_TASK:> Description: def get_notebook_daemon_command(self, name, action, port=0, *extra): """ Assume we launch Notebook with the same Python which executed us. """
return [self.python, self.cmd, action, self.get_pid(name), self.get_work_folder(name), port, self.kill_timeout] + list(extra)
<SYSTEM_TASK:> Get the running named Notebook status. <END_TASK> <USER_TASK:> Description: def get_notebook_status(self, name): """Get the running named Notebook status. :return: None if no notebook is running, otherwise context dictionary """
context = comm.get_context(self.get_pid(name)) if not context: return None return context
<SYSTEM_TASK:> Start new IPython Notebook daemon. <END_TASK> <USER_TASK:> Description: def start_notebook(self, name, context: dict, fg=False): """Start new IPython Notebook daemon. :param name: The owner of the Notebook will be *name*. He/she gets a new Notebook content folder created where all files are placed. :param context: Extra context information passed to the started Notebook. This must contain {context_hash:int} parameter used to identify the launch parameters for the notebook """
assert context assert type(context) == dict assert "context_hash" in context assert type(context["context_hash"]) == int http_port = self.pick_port() assert http_port context = context.copy() context["http_port"] = http_port # We can't proxy websocket URLs, so let them go directly through localhost or have front end server to do proxying (nginx) if "websocket_url" not in context: context["websocket_url"] = "ws://localhost:{port}".format(port=http_port) if "{port}" in context["websocket_url"]: # Do port substitution for the websocket URL context["websocket_url"] = context["websocket_url"].format(port=http_port) pid = self.get_pid(name) assert "terminated" not in context comm.set_context(pid, context) if fg: self.exec_notebook_daemon_command(name, "fg", port=http_port) else: self.exec_notebook_daemon_command(name, "start", port=http_port)
<SYSTEM_TASK:> Start notebook if not yet running with these settings. <END_TASK> <USER_TASK:> Description: def start_notebook_on_demand(self, name, context): """Start notebook if not yet running with these settings. Return the updated settings with a port info. :return: (context dict, created flag) """
if self.is_running(name): last_context = self.get_context(name) logger.info("Notebook context change detected for %s", name) if not self.is_same_context(context, last_context): self.stop_notebook(name) # Make sure we don't get race condition over context.json file time.sleep(2.0) else: return last_context, False err_log = os.path.join(self.get_work_folder(name), "notebook.stderr.log") logger.info("Launching new Notebook named %s, context is %s", name, context) logger.info("Notebook log is %s", err_log) self.start_notebook(name, context) time.sleep(1) context = self.get_context(name) if "notebook_name" not in context: # Failed to launch within timeout raise RuntimeError("Failed to launch IPython Notebook, see {}".format(err_log)) return context, True
<SYSTEM_TASK:> Wraps an app factory to provide a fallback in case of import errors. <END_TASK> <USER_TASK:> Description: def failsafe(func): """ Wraps an app factory to provide a fallback in case of import errors. Takes a factory function to generate a Flask app. If there is an error creating the app, it will return a dummy app that just returns the Flask error page for the exception. This works with the Flask code reloader so that if the app fails during initialization it will still monitor those files for changes and reload the app. """
@functools.wraps(func) def wrapper(*args, **kwargs): extra_files = [] try: return func(*args, **kwargs) except: exc_type, exc_val, exc_tb = sys.exc_info() traceback.print_exc() tb = exc_tb while tb: filename = tb.tb_frame.f_code.co_filename extra_files.append(filename) tb = tb.tb_next if isinstance(exc_val, SyntaxError): extra_files.append(exc_val.filename) app = _FailSafeFlask(extra_files) app.debug = True @app.route('/') @app.route('/<path:path>') def index(path='/'): reraise(exc_type, exc_val, exc_tb) return app return wrapper
<SYSTEM_TASK:> Validate specified JSON object obj with specified schema. <END_TASK> <USER_TASK:> Description: def validate(cls, schema, obj): """ Validate specified JSON object obj with specified schema. :param schema: Schema to validate against :type schema: :class:`json_schema_validator.schema.Schema` :param obj: JSON object to validate :rtype: bool :returns: True on success :raises `json_schema_validator.errors.ValidationError`: if the object does not match schema. :raises `json_schema_validator.errors.SchemaError`: if the schema itself is wrong. """
if not isinstance(schema, Schema): raise ValueError( "schema value {0!r} is not a Schema" " object".format(schema)) self = cls() self.validate_toplevel(schema, obj) return True
<SYSTEM_TASK:> Report an error during validation. <END_TASK> <USER_TASK:> Description: def _report_error(self, legacy_message, new_message=None, schema_suffix=None): """ Report an error during validation. There are two error messages. The legacy message is used for backwards compatibility and usually contains the object (possibly very large) that failed to validate. The new message is much better as it contains just a short message on what went wrong. User code can inspect object_expr and schema_expr to see which part of the object failed to validate against which part of the schema. The schema_suffix, if provided, is appended to the schema_expr. This is quite handy to specify the bit that the validator looked at (such as the type or optional flag, etc). object_suffix serves the same purpose but is used for object expressions instead. """
object_expr = self._get_object_expression() schema_expr = self._get_schema_expression() if schema_suffix: schema_expr += schema_suffix raise ValidationError(legacy_message, new_message, object_expr, schema_expr)
<SYSTEM_TASK:> Construct a sub-schema from a property of the current schema. <END_TASK> <USER_TASK:> Description: def _push_property_schema(self, prop): """Construct a sub-schema from a property of the current schema."""
schema = Schema(self._schema.properties[prop]) self._push_schema(schema, ".properties." + prop)
<SYSTEM_TASK:> When the daemon is started write out the information which port it was using. <END_TASK> <USER_TASK:> Description: def get_context_file_name(pid_file): """When the daemon is started write out the information which port it was using."""
root = os.path.dirname(pid_file) port_file = os.path.join(root, "context.json") return port_file
<SYSTEM_TASK:> Get context of running notebook. <END_TASK> <USER_TASK:> Description: def get_context(pid_file, daemon=False): """Get context of running notebook. A context file is created when notebook starts. :param daemon: Are we trying to fetch the context inside the daemon. Otherwise do the death check. :return: dict or None if the process is dead/not launcherd """
port_file = get_context_file_name(pid_file) if not os.path.exists(port_file): return None with open(port_file, "rt") as f: json_data = f.read() try: data = json.loads(json_data) except ValueError as e: logger.error("Damaged context json data %s", json_data) return None if not daemon: pid = data.get("pid") if pid and not check_pid(int(pid)): # The Notebook daemon has exited uncleanly, as the PID does not point to any valid process return None return data
<SYSTEM_TASK:> Called at exit. Delete the context file to signal there is no active notebook. <END_TASK> <USER_TASK:> Description: def clear_context(pid_file): """Called at exit. Delete the context file to signal there is no active notebook. We don't delete the whole file, but leave it around for debugging purposes. Maybe later we want to pass some information back to the web site. """
return raise RuntimeError("Should not happen") fname = get_context_file_name(pid_file) shutil.move(fname, fname.replace("context.json", "context.old.json")) data = {} data["terminated"] = str(datetime.datetime.now(datetime.timezone.utc)) set_context(pid_file, data)
<SYSTEM_TASK:> Run CPU not faster than given speedlimit <END_TASK> <USER_TASK:> Description: def delayed_burst_run(self, target_cycles_per_sec): """ Run CPU not faster than given speedlimit """
old_cycles = self.cycles start_time = time.time() self.burst_run() is_duration = time.time() - start_time new_cycles = self.cycles - old_cycles try: is_cycles_per_sec = new_cycles / is_duration except ZeroDivisionError: pass else: should_burst_duration = is_cycles_per_sec / target_cycles_per_sec target_duration = should_burst_duration * is_duration delay = target_duration - is_duration if delay > 0: if delay > self.max_delay: self.delay = self.max_delay else: self.delay = delay time.sleep(self.delay) self.call_sync_callbacks()
<SYSTEM_TASK:> This does a model count so the side bar looks nice. <END_TASK> <USER_TASK:> Description: def get_model_counts(tagged_models, tag): """ This does a model count so the side bar looks nice. """
model_counts = [] for model in tagged_models: model['count'] = model['query'](tag).count() if model['count']: model_counts.append(model) return model_counts
<SYSTEM_TASK:> given the buffer start and end indices of a range, compute the border edges <END_TASK> <USER_TASK:> Description: def compute_region_border(start, end): """ given the buffer start and end indices of a range, compute the border edges that should be drawn to enclose the range. this function currently assumes 0x10 length rows. the result is a dictionary from buffer index to Cell instance. the Cell instance has boolean properties "top", "bottom", "left", and "right" that describe if a border should be drawn on that side of the cell view. :rtype: Mapping[int, CellT] """
cells = defaultdict(Cell) start_row = row_number(start) end_row = row_number(end) if end % 0x10 == 0: end_row -= 1 ## topmost cells if start_row == end_row: for i in range(start, end): cells[i].top = True else: for i in range(start, row_end_index(start) + 1): cells[i].top = True # cells on second row, top left if start_row != end_row: next_row_start = row_start_index(start) + 0x10 for i in range(next_row_start, next_row_start + column_number(start)): cells[i].top = True ## bottommost cells if start_row == end_row: for i in range(start, end): cells[i].bottom = True else: for i in range(row_start_index(end), end): cells[i].bottom = True # cells on second-to-last row, bottom right if start_row != end_row: prev_row_end = row_end_index(end) - 0x10 for i in range(prev_row_end - (0x10 - column_number(end) - 1), prev_row_end + 1): cells[i].bottom = True ## leftmost cells if start_row == end_row: cells[start].left = True else: second_row_start = row_start_index(start) + 0x10 for i in range(second_row_start, row_start_index(end) + 0x10, 0x10): cells[i].left = True # cells in first row, top left if start_row != end_row: cells[start].left = True ## rightmost cells if start_row == end_row: cells[end - 1].right = True else: penultimate_row_end = row_end_index(end) - 0x10 for i in range(row_end_index(start), penultimate_row_end + 0x10, 0x10): cells[i].right = True # cells in last row, bottom right if start_row != end_row: cells[end - 1].right = True # convert back to standard dict # trick from: http://stackoverflow.com/a/20428703/87207 cells.default_factory = None return cells
<SYSTEM_TASK:> Convert SWF slides into a PNG image <END_TASK> <USER_TASK:> Description: def swf2png(swf_path, png_path, swfrender_path="swfrender"): """Convert SWF slides into a PNG image Raises: OSError is raised if swfrender is not available. ConversionError is raised if image cannot be created. """
# Currently rely on swftools # # Would be great to have a native python dependency to convert swf into png or jpg. # However it seems that pyswf isn't flawless. Some graphical elements (like the text!) are lost during # the export. try: cmd = [swfrender_path, swf_path, '-o', png_path] subprocess.check_output(cmd, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: raise ConversionError("Failed to convert SWF file %s.\n" "\tCommand: %s\n" "\tExit status: %s.\n" "\tOutput:\n%s" % (swf_path, " ".join(cmd), e.returncode, e.output))
<SYSTEM_TASK:> Create the presentation. <END_TASK> <USER_TASK:> Description: def create_presentation(self): """ Create the presentation. The audio track is mixed with the slides. The resulting file is saved as self.output DownloadError is raised if some resources cannot be fetched. ConversionError is raised if the final video cannot be created. """
# Avoid wasting time and bandwidth if we known that conversion will fail. if not self.overwrite and os.path.exists(self.output): raise ConversionError("File %s already exist and --overwrite not specified" % self.output) video = self.download_video() raw_slides = self.download_slides() # ffmpeg does not support SWF png_slides = self._convert_slides(raw_slides) # Create one frame per second using the time code information frame_pattern = self._prepare_frames(png_slides) return self._assemble(video, frame_pattern)
<SYSTEM_TASK:> Download all SWF slides. <END_TASK> <USER_TASK:> Description: def download_slides(self): """ Download all SWF slides. The location of the slides files are returned. A DownloadError is raised if at least one of the slides cannot be download.. """
return self.presentation.client.download_all(self.presentation.metadata['slides'], self.tmp_dir)
<SYSTEM_TASK:> Fetch the resource specified and return its content. <END_TASK> <USER_TASK:> Description: def fetch_no_cache(self, url): """ Fetch the resource specified and return its content. DownloadError is raised if the resource cannot be fetched. """
try: with contextlib.closing(self.opener.open(url)) as response: # InfoQ does not send a 404 but a 302 redirecting to a valid URL... if response.code != 200 or response.url == INFOQ_404_URL: raise DownloadError("%s not found" % url) return response.read() except urllib.error.URLError as e: raise DownloadError("Failed to get %s: %s" % (url, e))
<SYSTEM_TASK:> Download the resources specified by url into dir_path. The resulting <END_TASK> <USER_TASK:> Description: def download(self, url, dir_path, filename=None): """ Download the resources specified by url into dir_path. The resulting file path is returned. DownloadError is raised the resources cannot be downloaded. """
if not filename: filename = url.rsplit('/', 1)[1] path = os.path.join(dir_path, filename) content = self.fetch(url) with open(path, "wb") as f: f.write(content) return path
<SYSTEM_TASK:> Download all the resources specified by urls into dir_path. The resulting <END_TASK> <USER_TASK:> Description: def download_all(self, urls, dir_path): """ Download all the resources specified by urls into dir_path. The resulting file paths is returned. DownloadError is raised if at least one of the resources cannot be downloaded. In the case already downloaded resources are erased. """
# TODO: Implement parallel download filenames = [] try: for url in urls: filenames.append(self.download(url, dir_path)) except DownloadError as e: for filename in filenames: os.remove(filename) raise e return filenames
<SYSTEM_TASK:> pushed a byte onto stack <END_TASK> <USER_TASK:> Description: def push_byte(self, stack_pointer, byte): """ pushed a byte onto stack """
# FIXME: self.system_stack_pointer -= 1 stack_pointer.decrement(1) addr = stack_pointer.value # log.info( # log.error( # "%x|\tpush $%x to %s stack at $%x\t|%s", # self.last_op_address, byte, stack_pointer.name, addr, # self.cfg.mem_info.get_shortest(self.last_op_address) # ) self.memory.write_byte(addr, byte)
<SYSTEM_TASK:> pulled a byte from stack <END_TASK> <USER_TASK:> Description: def pull_byte(self, stack_pointer): """ pulled a byte from stack """
addr = stack_pointer.value byte = self.memory.read_byte(addr) # log.info( # log.error( # "%x|\tpull $%x from %s stack at $%x\t|%s", # self.last_op_address, byte, stack_pointer.name, addr, # self.cfg.mem_info.get_shortest(self.last_op_address) # ) # FIXME: self.system_stack_pointer += 1 stack_pointer.increment(1) return byte
<SYSTEM_TASK:> See if we have notebook already running this context and if not then launch new one. <END_TASK> <USER_TASK:> Description: def launch_on_demand(request, username, notebook_context): """See if we have notebook already running this context and if not then launch new one."""
security_check(request, username) settings = request.registry.settings notebook_folder = settings.get("pyramid_notebook.notebook_folder", None) if not notebook_folder: raise RuntimeError("Setting missing: pyramid_notebook.notebook_folder") kill_timeout = settings.get("pyramid_notebook.kill_timeout", None) if not kill_timeout: raise RuntimeError("Setting missing: pyramid_notebook.kill_timeout") kill_timeout = int(kill_timeout) if not notebook_context: notebook_context = {} # Override notebook Jinja templates if "extra_template_paths" not in notebook_context: notebook_context["extra_template_paths"] = [os.path.join(os.path.dirname(__file__), "server", "templates")] # Furious invalid state follows if we let this slip through assert type(notebook_context["extra_template_paths"]) == list, "Got bad extra_template_paths {}".format(notebook_context["extra_template_paths"]) notebook_folder = settings.get("pyramid_notebook.notebook_folder", None) if not notebook_folder: raise RuntimeError("Setting missing: pyramid_notebook.notebook_folder") kill_timeout = settings.get("pyramid_notebook.kill_timeout", None) if not kill_timeout: raise RuntimeError("Setting missing: pyramid_notebook.kill_timeout") kill_timeout = int(kill_timeout) prepare_notebook_context(request, notebook_context) # Configure websockets # websocket_url = settings.get("pyramid_notebook.websocket_url") # assert websocket_url, "pyramid_notebook.websocket_url setting missing" # assert websocket_url.startswith("ws:/") or websocket_url.startswith("wss:/") if request.registry.settings.get("pyramid_notebook.websocket_proxy", ""): websocket_url = route_to_alt_domain(request, request.host_url) websocket_url = websocket_url.replace("http://", "ws://").replace("https://", "wss://") notebook_context["websocket_url"] = websocket_url else: # Connect websockets directly to localhost notebook server, do not try to proxy them websocket_url = "ws://localhost:{port}/notebook/" # Record the hash of the current parameters, so we know if this user accesses the notebook in this or different context if "context_hash" not in notebook_context: notebook_context["context_hash"] = make_dict_hash(notebook_context) manager = NotebookManager(notebook_folder, kill_timeout=kill_timeout) notebook_info, creates = manager.start_notebook_on_demand(username, notebook_context) return notebook_info
<SYSTEM_TASK:> Stop any running notebook for a user. <END_TASK> <USER_TASK:> Description: def shutdown_notebook(request, username): """Stop any running notebook for a user."""
manager = get_notebook_manager(request) if manager.is_running(username): manager.stop_notebook(username)
<SYSTEM_TASK:> Route URL to a different subdomain. <END_TASK> <USER_TASK:> Description: def route_to_alt_domain(request, url): """Route URL to a different subdomain. Used to rewrite URLs to point to websocket serving domain. """
# Do we need to route IPython Notebook request from a different location alternative_domain = request.registry.settings.get("pyramid_notebook.alternative_domain", "").strip() if alternative_domain: url = url.replace(request.host_url, alternative_domain) return url
<SYSTEM_TASK:> add the given buffer indices to the given QItemSelection, both byte and char panes <END_TASK> <USER_TASK:> Description: def _bselect(self, selection, start_bindex, end_bindex): """ add the given buffer indices to the given QItemSelection, both byte and char panes """
selection.select(self._model.index2qindexb(start_bindex), self._model.index2qindexb(end_bindex)) selection.select(self._model.index2qindexc(start_bindex), self._model.index2qindexc(end_bindex))
<SYSTEM_TASK:> select the given range by qmodel indices <END_TASK> <USER_TASK:> Description: def _update_selection(self, qindex1, qindex2): """ select the given range by qmodel indices """
m = self.model() self._do_select(m.qindex2index(qindex1), m.qindex2index(qindex2))
<SYSTEM_TASK:> override this method to customize the context menu <END_TASK> <USER_TASK:> Description: def get_context_menu(self, qpoint): """ override this method to customize the context menu """
menu = QMenu(self) index = self.view.indexAt(qpoint) def add_action(menu, text, handler, icon=None): a = None if icon is None: a = QAction(text, self) else: a = QAction(icon, text, self) a.triggered.connect(handler) menu.addAction(a) add_action(menu, "Color selection", self._handle_color_selection) # duplication here with vstructui color_menu = menu.addMenu("Color selection...") # need to escape the closure capture on the color loop variable below # hint from: http://stackoverflow.com/a/6035865/87207 def make_color_selection_handler(color): return lambda: self._handle_color_selection(color=color) for color in QT_COLORS: add_action(color_menu, "{:s}".format(color.name), make_color_selection_handler(color.qcolor), make_color_icon(color.qcolor)) start = self._hsm.start end = self._hsm.end cm = self.getColorModel() if (start == end and cm.is_index_colored(start)) or cm.is_region_colored(start, end): def make_remove_color_handler(r): return lambda: self._handle_remove_color_range(r) remove_color_menu = menu.addMenu("Remove color...") for cr in cm.get_region_colors(start, end): pixmap = QPixmap(10, 10) pixmap.fill(cr.color) icon = QIcon(pixmap) add_action(remove_color_menu, "Remove color [{:s}, {:s}], len: {:s}".format(h(cr.begin), h(cr.end), h(cr.end - cr.begin)), make_remove_color_handler(cr), make_color_icon(cr.color)) menu.addSeparator() # ----------------------------------------------------------------- add_action(menu, "Copy selection (binary)", self._handle_copy_binary) copy_menu = menu.addMenu("Copy...") add_action(copy_menu, "Copy selection (binary)", self._handle_copy_binary) add_action(copy_menu, "Copy selection (text)", self._handle_copy_text) add_action(copy_menu, "Copy selection (hex)", self._handle_copy_hex) add_action(copy_menu, "Copy selection (hexdump)", self._handle_copy_hexdump) add_action(copy_menu, "Copy selection (base64)", self._handle_copy_base64) menu.addSeparator() # ----------------------------------------------------------------- add_action(menu, "Add origin", lambda: self._handle_add_origin(index)) return menu
<SYSTEM_TASK:> Call every sync callback with CPU cycles trigger <END_TASK> <USER_TASK:> Description: def call_sync_callbacks(self): """ Call every sync callback with CPU cycles trigger """
current_cycles = self.cycles for callback_cycles, callback in self.sync_callbacks: # get the CPU cycles count of the last call last_call_cycles = self.sync_callbacks_cyles[callback] if current_cycles - last_call_cycles > callback_cycles: # this callback should be called # Save the current cycles, to trigger the next call self.sync_callbacks_cyles[callback] = self.cycles # Call the callback function callback(current_cycles - last_call_cycles)
<SYSTEM_TASK:> call op from page 2 or 3 <END_TASK> <USER_TASK:> Description: def instruction_PAGE(self, opcode): """ call op from page 2 or 3 """
op_address, opcode2 = self.read_pc_byte() paged_opcode = opcode * 256 + opcode2 # log.debug("$%x *** call paged opcode $%x" % ( # self.program_counter, paged_opcode # )) self.call_instruction_func(op_address - 1, paged_opcode)
<SYSTEM_TASK:> Adds the 16-bit memory value into the 16-bit accumulator <END_TASK> <USER_TASK:> Description: def instruction_ADD16(self, opcode, m, register): """ Adds the 16-bit memory value into the 16-bit accumulator source code forms: ADDD P CC bits "HNZVC": -aaaa """
assert register.WIDTH == 16 old = register.value r = old + m register.set(r) # log.debug("$%x %02x %02x ADD16 %s: $%02x + $%02x = $%02x" % ( # self.program_counter, opcode, m, # register.name, # old, m, r # )) self.clear_NZVC() self.update_NZVC_16(old, m, r)
<SYSTEM_TASK:> Adds the memory byte into an 8-bit accumulator. <END_TASK> <USER_TASK:> Description: def instruction_ADD8(self, opcode, m, register): """ Adds the memory byte into an 8-bit accumulator. source code forms: ADDA P; ADDB P CC bits "HNZVC": aaaaa """
assert register.WIDTH == 8 old = register.value r = old + m register.set(r) # log.debug("$%x %02x %02x ADD8 %s: $%02x + $%02x = $%02x" % ( # self.program_counter, opcode, m, # register.name, # old, m, r # )) self.clear_HNZVC() self.update_HNZVC_8(old, m, r)
<SYSTEM_TASK:> Subtract one from the register. The carry bit is not affected, thus <END_TASK> <USER_TASK:> Description: def DEC(self, a): """ Subtract one from the register. The carry bit is not affected, thus allowing this instruction to be used as a loop counter in multiple- precision computations. When operating on unsigned values, only BEQ and BNE branches can be expected to behave consistently. When operating on twos complement values, all signed branches are available. source code forms: DEC Q; DECA; DECB CC bits "HNZVC": -aaa- """
r = a - 1 self.clear_NZV() self.update_NZ_8(r) if r == 0x7f: self.V = 1 return r
<SYSTEM_TASK:> Decrement memory location <END_TASK> <USER_TASK:> Description: def instruction_DEC_memory(self, opcode, ea, m): """ Decrement memory location """
r = self.DEC(m) # log.debug("$%x DEC memory value $%x -1 = $%x and write it to $%x \t| %s" % ( # self.program_counter, # m, r, ea, # self.cfg.mem_info.get_shortest(ea) # )) return ea, r & 0xff
<SYSTEM_TASK:> This instruction transforms a twos complement 8-bit value in accumulator <END_TASK> <USER_TASK:> Description: def instruction_SEX(self, opcode): """ This instruction transforms a twos complement 8-bit value in accumulator B into a twos complement 16-bit value in the D accumulator. source code forms: SEX CC bits "HNZVC": -aa0- // 0x1d SEX inherent case 0x1d: WREG_A = (RREG_B & 0x80) ? 0xff : 0; CLR_NZ; SET_NZ16(REG_D); peek_byte(cpu, REG_PC); #define SIGNED(b) ((Word)(b&0x80?b|0xff00:b)) case 0x1D: /* SEX */ tw=SIGNED(ibreg); SETNZ16(tw) SETDREG(tw) break; """
b = self.accu_b.value if b & 0x80 == 0: self.accu_a.set(0x00) d = self.accu_d.value # log.debug("SEX: b=$%x ; $%x&0x80=$%x ; d=$%x", b, b, (b & 0x80), d) self.clear_NZ() self.update_NZ_16(d)
<SYSTEM_TASK:> Show nicely the generic object received. <END_TASK> <USER_TASK:> Description: def _str(obj): """Show nicely the generic object received."""
values = [] for name in obj._attribs: val = getattr(obj, name) if isinstance(val, str): val = repr(val) val = str(val) if len(str(val)) < 10 else "(...)" values.append((name, val)) values = ", ".join("{}={}".format(k, v) for k, v in values) return "{}({})".format(obj.__class__.__name__, values)
<SYSTEM_TASK:> Show the received object as precise as possible. <END_TASK> <USER_TASK:> Description: def _repr(obj): """Show the received object as precise as possible."""
vals = ", ".join("{}={!r}".format( name, getattr(obj, name)) for name in obj._attribs) if vals: t = "{}(name={}, {})".format(obj.__class__.__name__, obj.name, vals) else: t = "{}(name={})".format(obj.__class__.__name__, obj.name) return t
<SYSTEM_TASK:> Create a generic object for the tags. <END_TASK> <USER_TASK:> Description: def _make_object(name): """Create a generic object for the tags."""
klass = type(name, (SWFObject,), {'__str__': _str, '__repr__': _repr, 'name': name}) return klass()
<SYSTEM_TASK:> Parse a SWF. <END_TASK> <USER_TASK:> Description: def parsefile(filename, read_twips=True): """Parse a SWF. If you have a file object already, just use SWFParser directly. read_twips: True - return values as read from the SWF False - return values in pixels (at 100% zoom) """
with open(filename, 'rb') as fh: return SWFParser(fh, read_twips)
<SYSTEM_TASK:> A generic parser for several PlaceObjectX. <END_TASK> <USER_TASK:> Description: def _generic_placeobject_parser(self, obj, version): """A generic parser for several PlaceObjectX."""
bc = BitConsumer(self._src) obj.PlaceFlagHasClipActions = bc.u_get(1) obj.PlaceFlagHasClipDepth = bc.u_get(1) obj.PlaceFlagHasName = bc.u_get(1) obj.PlaceFlagHasRatio = bc.u_get(1) obj.PlaceFlagHasColorTransform = bc.u_get(1) obj.PlaceFlagHasMatrix = bc.u_get(1) obj.PlaceFlagHasCharacter = bc.u_get(1) obj.PlaceFlagMove = bc.u_get(1) if version == 3: obj.Reserved = bc.u_get(1) obj.PlaceFlagOpaqueBackground = bc.u_get(1) obj.PlaceFlagHasVisible = bc.u_get(1) obj.PlaceFlagHasImage = bc.u_get(1) obj.PlaceFlagHasClassName = bc.u_get(1) obj.PlaceFlagHasCacheAsBitmap = bc.u_get(1) obj.PlaceFlagHasBlendMode = bc.u_get(1) obj.PlaceFlagHasFilterList = bc.u_get(1) obj.Depth = unpack_ui16(self._src) if version == 3: if obj.PlaceFlagHasClassName or ( obj.PlaceFlagHasImage and obj.PlaceFlagHasCharacter): obj.ClassName = self._get_struct_string() if obj.PlaceFlagHasCharacter: obj.CharacterId = unpack_ui16(self._src) if obj.PlaceFlagHasMatrix: obj.Matrix = self._get_struct_matrix() if obj.PlaceFlagHasColorTransform: obj.ColorTransform = self._get_struct_cxformwithalpha() if obj.PlaceFlagHasRatio: obj.Ratio = unpack_ui16(self._src) if obj.PlaceFlagHasName: obj.Name = self._get_struct_string() if obj.PlaceFlagHasClipDepth: obj.ClipDepth = unpack_ui16(self._src) if version == 3: if obj.PlaceFlagHasFilterList: obj.SurfaceFilterList = self._get_struct_filterlist() if obj.PlaceFlagHasBlendMode: obj.BlendMode = unpack_ui8(self._src) if obj.PlaceFlagHasCacheAsBitmap: obj.BitmapCache = unpack_ui8(self._src) if obj.PlaceFlagHasVisible: obj.Visible = unpack_ui8(self._src) obj.BackgroundColor = self._get_struct_rgba() if obj.PlaceFlagHasClipActions: obj.ClipActions = self._get_struct_clipactions()
<SYSTEM_TASK:> Calculate the coverage of a file. <END_TASK> <USER_TASK:> Description: def coverage(self): """Calculate the coverage of a file."""
items_unk = collections.Counter() items_ok = collections.Counter() def _go_deep(obj): """Recursive function to find internal attributes.""" if type(obj).__name__ in ('UnknownObject', 'UnknownAction'): # blatantly unknown items_unk[obj.name] += 1 elif obj.name in ('DefineMorphShape2', 'ClipActions'): # these are incomplete, see FIXMEs in the code above items_unk[obj.name] += 1 else: # fully parsed items_ok[obj.name] += 1 for name in obj._attribs: attr = getattr(obj, name) if isinstance(attr, SWFObject): _go_deep(attr) for tag in self.tags: _go_deep(tag) full_count = sum(items_ok.values()) + sum(items_unk.values()) coverage = 100 * sum(items_ok.values()) / full_count print("Coverage is {:.1f}% of {} total items".format(coverage, full_count)) print("Most common parsed objects:") for k, v in items_ok.most_common(3): print("{:5d} {}".format(v, k)) if items_unk: print("Most common Unknown objects") for k, v in items_unk.most_common(3): print("{:5d} {}".format(v, k))
<SYSTEM_TASK:> Checkerboard mixer. <END_TASK> <USER_TASK:> Description: def checkerboard(img_spec1=None, img_spec2=None, patch_size=10, view_set=(0, 1, 2), num_slices=(10,), num_rows=2, rescale_method='global', background_threshold=0.05, annot=None, padding=5, output_path=None, figsize=None, ): """ Checkerboard mixer. Parameters ---------- img_spec1 : str or nibabel image-like object MR image (or path to one) to be visualized img_spec2 : str or nibabel image-like object MR image (or path to one) to be visualized patch_size : int or list or (int, int) or None size of checker patch (either square or rectangular) If None, number of voxels/patch are chosen such that, there will be 7 patches through the width/height. view_set : iterable Integers specifying the dimensions to be visualized. Choices: one or more of (0, 1, 2) for a 3D image num_slices : int or iterable of size as view_set number of slices to be selected for each view Must be of the same length as view_set, each element specifying the number of slices for each dimension. If only one number is given, same number will be chosen for all dimensions. num_rows : int number of rows (top to bottom) per each of 3 dimensions rescale_method : bool or str or list or None Range to rescale the intensity values to Default: 'global', min and max values computed based on ranges from both images. If false or None, no rescaling is done (does not work yet). background_threshold : float or str A threshold value below which all the background voxels will be set to zero. Default : 0.05. Other option is a string specifying a percentile: '5%', '10%'. Specify None if you don't want any thresholding. annot : str Text to display to annotate the visualization padding : int number of voxels to pad around each panel. output_path : str path to save the generate collage to. figsize : list Size of figure in inches to be passed on to plt.figure() e.g. [12, 12] or [20, 20] Returns ------- fig : figure handle handle to the collage figure generated. """
img_one, img_two = _preprocess_images(img_spec1, img_spec2, rescale_method=rescale_method, bkground_thresh=background_threshold, padding=padding) display_params = dict(interpolation='none', aspect='auto', origin='lower', cmap='gray', vmin=0.0, vmax=1.0) mixer = partial(_checker_mixer, checker_size=patch_size) collage = Collage(view_set=view_set, num_slices=num_slices, num_rows=num_rows, figsize=figsize, display_params=display_params) collage.transform_and_attach((img_one, img_two), func=mixer) collage.save(output_path=output_path, annot=annot) return collage
<SYSTEM_TASK:> Voxel-wise difference map. <END_TASK> <USER_TASK:> Description: def voxelwise_diff(img_spec1=None, img_spec2=None, abs_value=True, cmap='gray', overlay_image=False, overlay_alpha=0.8, num_rows=2, num_cols=6, rescale_method='global', background_threshold=0.05, annot=None, padding=5, output_path=None, figsize=None): """ Voxel-wise difference map. Parameters ---------- img_spec1 : str or nibabel image-like object MR image (or path to one) to be visualized img_spec2 : str or nibabel image-like object MR image (or path to one) to be visualized abs_value : bool Flag indicating whether to take the absolute value of the diffenence or not. Default: True, display absolute differences only (so order of images does not matter) Colormap to show the difference values. overlay_image : bool Flag to specify whether to overlay the difference values on the original image. .. note: This feature is not reliable and supported well yet. num_rows : int number of rows (top to bottom) per each of 3 dimensions num_cols : int number of panels (left to right) per row of each dimension. rescale_method : bool or str or list or None Range to rescale the intensity values to Default: 'global', min and max values computed based on ranges from both images. If false or None, no rescaling is done (does not work yet). background_threshold : float or str A threshold value below which all the background voxels will be set to zero. Default : 0.05. Other option is a string specifying a percentile: '5%', '10%'. Specify None if you don't want any thresholding. annot : str Text to display to annotate the visualization padding : int number of voxels to pad around each panel. output_path : str path to save the generate collage to. figsize : list Size of figure in inches to be passed on to plt.figure() e.g. [12, 12] or [20, 20] Returns ------- fig : figure handle handle to the collage figure generated. """
if not isinstance(abs_value, bool): abs_value = bool(abs_value) mixer_params = dict(abs_value=abs_value, cmap=cmap, overlay_image=overlay_image, overlay_alpha=overlay_alpha) fig = _compare(img_spec1, img_spec2, num_rows=num_rows, num_cols=num_cols, mixer='voxelwise_diff', annot=annot, padding=padding, rescale_method=rescale_method, bkground_thresh=background_threshold, output_path=output_path, figsize=figsize, **mixer_params) return fig
<SYSTEM_TASK:> Produces checkerboard comparison plot of two 3D images. <END_TASK> <USER_TASK:> Description: def _compare(img_spec1, img_spec2, num_rows=2, num_cols=6, mixer='checker_board', rescale_method='global', annot=None, padding=5, bkground_thresh=0.05, output_path=None, figsize=None, **kwargs): """ Produces checkerboard comparison plot of two 3D images. Parameters ---------- img_spec1 : str or nibabel image-like object MR image (or path to one) to be visualized img_spec2 : str or nibabel image-like object MR image (or path to one) to be visualized num_rows : int number of rows (top to bottom) per each of 3 dimensions num_cols : int number of panels (left to right) per row of each dimension. mixer : str type of mixer to produce the comparison figure. Options: checker_board, color_mix, diff_abs, rescale_method : bool or str or list or None Method to rescale the intensity values to. Choices : 'global', 'each', False or None. Default: 'global', min and max values computed based on ranges from both images. If 'each', rescales each image separately to [0, 1]. This option is useful when overlaying images with very different intensity ranges e.g. from different modalities altogether. If False or None, no rescaling is done (does not work yet). annot : str Text to display to annotate the visualization padding : int number of voxels to pad around each panel. output_path : str path to save the generate collage to. figsize : list Size of figure in inches to be passed on to plt.figure() e.g. [12, 12] or [20, 20] kwargs : dict Additional arguments specific to the particular mixer e.g. alpha_channels = [1, 1] for the color_mix mixer Returns ------- """
num_rows, num_cols, padding = check_params(num_rows, num_cols, padding) img1, img2 = check_images(img_spec1, img_spec2, bkground_thresh=bkground_thresh) img1, img2 = crop_to_extents(img1, img2, padding) num_slices_per_view = num_rows * num_cols slices = pick_slices(img2, num_slices_per_view) rescale_images, img1, img2, min_value, max_value = check_rescaling(img1, img2, rescale_method) plt.style.use('dark_background') num_axes = 3 if figsize is None: figsize = [3 * num_axes * num_rows, 3 * num_cols] fig, ax = plt.subplots(num_axes * num_rows, num_cols, figsize=figsize) # displaying some annotation text if provided # good choice would be the location of the input images (for future refwhen image is shared or misplaced!) if annot is not None: fig.suptitle(annot, backgroundcolor='black', color='g') display_params = dict(interpolation='none', aspect='equal', origin='lower') ax = ax.flatten() ax_counter = 0 for dim_index in range(3): for slice_num in slices[dim_index]: plt.sca(ax[ax_counter]) ax_counter = ax_counter + 1 slice1 = get_axis(img1, dim_index, slice_num) slice2 = get_axis(img2, dim_index, slice_num) mixed, mixer_spec_params = _generic_mixer(slice1, slice2, mixer, **kwargs) display_params.update(mixer_spec_params) plt.imshow(mixed, vmin=min_value, vmax=max_value, **display_params) # adjustments for proper presentation plt.axis('off') fig.tight_layout() if output_path is not None: output_path = output_path.replace(' ', '_') fig.savefig(output_path + '.png', bbox_inches='tight') # plt.close() return fig
<SYSTEM_TASK:> Generic mixer to process two slices with appropriate mixer <END_TASK> <USER_TASK:> Description: def _generic_mixer(slice1, slice2, mixer_name, **kwargs): """ Generic mixer to process two slices with appropriate mixer and return the composite to be displayed. """
mixer_name = mixer_name.lower() if mixer_name in ['color_mix', 'rgb']: mixed = _mix_color(slice1, slice2, **kwargs) cmap = None # data is already RGB-ed elif mixer_name in ['checkerboard', 'checker', 'cb', 'checker_board']: checkers = _get_checkers(slice1.shape, **kwargs) mixed = _checker_mixer(slice1, slice2, checkers) cmap = 'gray' elif mixer_name in ['diff', 'voxelwise_diff', 'vdiff']: mixed, cmap = _diff_image(slice1, slice2, **kwargs) # if kwargs['overlay_image'] is True: # diff_cmap = diff_colormap() # plt.imshow(slice1, alpha=kwargs['overlay_alpha'], **display_params) # plt.hold(True) # plt.imshow(mixed, # cmap=diff_cmap, # vmin=min_value, vmax=max_value, # **display_params) # else: # plt.imshow(mixed, cmap=cmap, # vmin=min_value, vmax=max_value, # **display_params) else: raise ValueError('Invalid mixer name chosen.') disp_params = dict(cmap=cmap) return mixed, disp_params
<SYSTEM_TASK:> Estimates the intensity range to clip the visualizations to <END_TASK> <USER_TASK:> Description: def check_rescaling(img1, img2, rescale_method): """Estimates the intensity range to clip the visualizations to"""
# estimating intensity ranges if rescale_method is None: # this section is to help user to avoid all intensity rescaling altogther! # TODO bug does not work yet, as pyplot does not offer any easy way to control it rescale_images = False min_value = None max_value = None norm_image = None # mpl.colors.NoNorm doesn't work yet. data is getting linearly normalized to [0, 1] elif isinstance(rescale_method, str): if rescale_method.lower() in ['global']: # TODO need a way to alert the user if one of the distributions is too narrow # in which case that image will be collapsed to an uniform value combined_distr = np.concatenate((img1.flatten(), img2.flatten())) min_value = combined_distr.min() max_value = combined_distr.max() elif rescale_method.lower() in ['each']: img1 = scale_0to1(img1) img2 = scale_0to1(img2) min_value = 0.0 max_value = 1.0 else: raise ValueError('rescaling method can only be "global" or "each"') rescale_images = True norm_image = mpl.colors.Normalize elif len(rescale_method) == 2: min_value = min(rescale_method) max_value = max(rescale_method) rescale_images = True norm_image = mpl.colors.Normalize else: raise ValueError('Invalid intensity range!. It must be either : ' '1) a list/tuple of two distinct values or' '2) "global" indicating rescaling based on min/max values derived from both images or' '3) None, no rescaling or norming altogether. ') return rescale_images, img1, img2, min_value, max_value
<SYSTEM_TASK:> Reads the two images and assers identical shape. <END_TASK> <USER_TASK:> Description: def check_images(img_spec1, img_spec2, bkground_thresh=0.05): """Reads the two images and assers identical shape."""
img1 = read_image(img_spec1, bkground_thresh) img2 = read_image(img_spec2, bkground_thresh) if img1.shape != img2.shape: raise ValueError('size mismatch! First image: {} Second image: {}\n' 'Two images to be compared must be of the same size in all dimensions.'.format( img1.shape, img2.shape)) return img1, img2
<SYSTEM_TASK:> Creates checkerboard of a given tile size, filling a given slice. <END_TASK> <USER_TASK:> Description: def _get_checkers(slice_shape, patch_size): """Creates checkerboard of a given tile size, filling a given slice."""
if patch_size is not None: patch_size = check_patch_size(patch_size) else: # 7 patches in each axis, min voxels/patch = 3 # TODO make 7 a user settable parameter patch_size = np.round(np.array(slice_shape) / 7).astype('int16') patch_size = np.maximum(patch_size, np.array([3, 3])) black = np.zeros(patch_size) white = np.ones(patch_size) tile = np.vstack((np.hstack([black, white]), np.hstack([white, black]))) # using ceil so we can clip the extra portions num_tiles = np.ceil(np.divide(slice_shape, tile.shape)).astype(int) checkers = np.tile(tile, num_tiles) # clipping any extra columns or rows if any(np.greater(checkers.shape, slice_shape)): if checkers.shape[0] > slice_shape[0]: checkers = np.delete(checkers, np.s_[slice_shape[0]:], axis=0) if checkers.shape[1] > slice_shape[1]: checkers = np.delete(checkers, np.s_[slice_shape[1]:], axis=1) return checkers
<SYSTEM_TASK:> Mixing them as red and green channels <END_TASK> <USER_TASK:> Description: def _mix_color(slice1, slice2, alpha_channels, color_space): """Mixing them as red and green channels"""
if slice1.shape != slice2.shape: raise ValueError('size mismatch between cropped slices and checkers!!!') alpha_channels = np.array(alpha_channels) if len(alpha_channels) != 2: raise ValueError('Alphas must be two value tuples.') slice1, slice2 = scale_images_0to1(slice1, slice2) # masking background combined_distr = np.concatenate((slice1.flatten(), slice2.flatten())) image_eps = np.percentile(combined_distr, 5) background = np.logical_or(slice1 <= image_eps, slice2 <= image_eps) if color_space.lower() in ['rgb']: red = alpha_channels[0] * slice1 grn = alpha_channels[1] * slice2 blu = np.zeros_like(slice1) # foreground = np.logical_not(background) # blu[foreground] = 1.0 mixed = np.stack((red, grn, blu), axis=2) elif color_space.lower() in ['hsv']: raise NotImplementedError( 'This method (color_space="hsv") is yet to fully conceptualized and implemented.') # TODO other ideas: hue/saturation/intensity value driven by difference in intensity? hue = alpha_channels[0] * slice1 sat = alpha_channels[1] * slice2 val = np.ones_like(slice1) hue[background] = 1.0 sat[background] = 0.0 val[background] = 0.0 mixed = np.stack((hue, sat, val), axis=2) # converting to RGB mixed = mpl.colors.hsv_to_rgb(mixed) # ensuring all values are clipped to [0, 1] mixed[mixed <= 0.0] = 0.0 mixed[mixed >= 1.0] = 1.0 return mixed
<SYSTEM_TASK:> Mixes the two slices in alternating areas specified by checkers <END_TASK> <USER_TASK:> Description: def _checker_mixer(slice1, slice2, checker_size=None): """Mixes the two slices in alternating areas specified by checkers"""
checkers = _get_checkers(slice1.shape, checker_size) if slice1.shape != slice2.shape or slice2.shape != checkers.shape: raise ValueError('size mismatch between cropped slices and checkers!!!') mixed = slice1.copy() mixed[checkers > 0] = slice2[checkers > 0] return mixed
<SYSTEM_TASK:> Generate a filtered query from request parameters. <END_TASK> <USER_TASK:> Description: def filter(self): """Generate a filtered query from request parameters. :returns: Filtered SQLALchemy query """
argmap = { filter.label or label: filter.field for label, filter in self.filters.items() } args = self.opts.parser.parse(argmap) query = self.query if self.query is not None else self.opts.query for label, filter in self.filters.items(): value = args.get(filter.label or label) if value is not None: query = filter.filter(query, self.opts.model, label, value) return query
<SYSTEM_TASK:> Validator to ensure proper usage. <END_TASK> <USER_TASK:> Description: def _check_min_density(self, min_density): """Validator to ensure proper usage."""
if min_density is None: self._min_density = -np.Inf elif (isinstance(min_density, float) and (0.0 <= min_density < 1.0)): self._min_density = min_density else: raise ValueError('min_density must be float and be >=0.0 and < 1.0')
<SYSTEM_TASK:> Checks if the density is too low. <END_TASK> <USER_TASK:> Description: def _not_empty(self, view, slice_): """Checks if the density is too low. """
img2d = self._get_axis(self._image, view, slice_) return (np.count_nonzero(img2d) / img2d.size) > self._min_density
<SYSTEM_TASK:> Samples the slices in the given dimension according the chosen strategy. <END_TASK> <USER_TASK:> Description: def _sample_slices_in_dim(self, view, num_slices, non_empty_slices): """Samples the slices in the given dimension according the chosen strategy."""
if self._sampling_method == 'linear': return self._linear_selection(non_empty_slices=non_empty_slices, num_slices=num_slices) elif self._sampling_method == 'percentage': return self._percent_selection(non_empty_slices=non_empty_slices) elif self._sampling_method == 'callable': return self._selection_by_callable(view=view, non_empty_slices=non_empty_slices, num_slices=num_slices) else: raise NotImplementedError('Invalid state for the class!')
<SYSTEM_TASK:> Selects linearly spaced slices in given <END_TASK> <USER_TASK:> Description: def _linear_selection(self, non_empty_slices, num_slices): """Selects linearly spaced slices in given"""
num_non_empty = len(non_empty_slices) # # trying to skip 5% slices at the tails (bottom clipping at 0) # skip_count = max(0, np.around(num_non_empty * 0.05).astype('int16')) # # only when possible # if skip_count > 0 and (num_non_empty - 2 * skip_count > num_slices): # non_empty_slices = non_empty_slices[skip_count: -skip_count] # num_non_empty = len(non_empty_slices) sampled_indices = np.linspace(0, num_non_empty, num=min(num_non_empty, num_slices), endpoint=False) slices_in_dim = non_empty_slices[np.around(sampled_indices).astype('int64')] return slices_in_dim
<SYSTEM_TASK:> Chooses slices at a given percentage between the first and last non-empty slice. <END_TASK> <USER_TASK:> Description: def _percent_selection(self, non_empty_slices): """Chooses slices at a given percentage between the first and last non-empty slice."""
return np.around(self._sampler * len(non_empty_slices) / 100).astype('int64')
<SYSTEM_TASK:> Returns all the slices selected by the given callable. <END_TASK> <USER_TASK:> Description: def _selection_by_callable(self, view, num_slices, non_empty_slices): """Returns all the slices selected by the given callable."""
selected = [sl for sl in non_empty_slices if self._sampler(self._get_axis(self._image, view, sl))] return selected[:num_slices]
<SYSTEM_TASK:> Generator over all the slices selected, each time returning a cross-section. <END_TASK> <USER_TASK:> Description: def get_slices(self, extended=False): """Generator over all the slices selected, each time returning a cross-section. Parameters ---------- extended : bool Flag to return just slice data (default, extended=False), or return a tuple of axis, slice_num, slice_data (extended=True) Returns ------- slice_data : an image (just slice data, default, with extended=False), or a tuple of axis, slice_num, slice_data (extended=True) """
for dim, slice_num in self._slices: yield self._get_axis(self._image, dim, slice_num, extended=extended)
<SYSTEM_TASK:> Returns the same cross-section from the multiple images supplied. <END_TASK> <USER_TASK:> Description: def get_slices_multi(self, image_list, extended=False): """Returns the same cross-section from the multiple images supplied. All images must be of the same shape as the original image defining this object. Parameters ---------- image_list : Iterable containing atleast 2 images extended : bool Flag to return just slice data (default, extended=False), or return a tuple of axis, slice_num, slice_data (extended=True) Returns ------- tuple_slice_data : tuple of one slice from each image in the input image list Let's denote it by as TSL. if extended=True, returns tuple(axis, slice_num, TSL) """
# ensure all the images have the same shape for img in image_list: if img.shape != self._image.shape: raise ValueError('Supplied images are not compatible with this class. ' 'They must have the shape: {}'.format(self._image_shape)) for dim, slice_num in self._slices: multiple_slices = (self._get_axis(img, dim, slice_num) for img in image_list) if not extended: # return just the slice data yield multiple_slices else: # additionally include which dim and which slice num # not using extended option in get_axis, to avoid complicating unpacking yield dim, slice_num, multiple_slices
<SYSTEM_TASK:> Creates a grid of axes bounded within a given rectangle. <END_TASK> <USER_TASK:> Description: def _make_grid_of_axes(self, bounding_rect=cfg.bounding_rect_default, num_rows=cfg.num_rows_per_view_default, num_cols=cfg.num_cols_grid_default, axis_pad=cfg.axis_pad_default, commn_annot=None, **axis_kwargs): """Creates a grid of axes bounded within a given rectangle."""
axes_in_grid = list() extents = self._compute_cell_extents_grid(bounding_rect=bounding_rect, num_cols=num_cols, num_rows=num_rows, axis_pad=axis_pad) for cell_ext in extents: ax_cell = self.fig.add_axes(cell_ext, frameon=False, visible=False, **axis_kwargs) if commn_annot is not None: ax_cell.set_title(commn_annot) ax_cell.set_axis_off() axes_in_grid.append(ax_cell) return axes_in_grid
<SYSTEM_TASK:> Turns off all the x and y axes in each Axis <END_TASK> <USER_TASK:> Description: def _create_imshow_objects(self): """Turns off all the x and y axes in each Axis"""
# uniform values for initial image can cause weird behaviour with normalization # as imshow.set_data() does not automatically update the normalization!! # using random data is a better choice random_image = np.random.rand(20, 20) self.images = [None] * len(self.flat_grid) for ix, ax in enumerate(self.flat_grid): self.images[ix] = ax.imshow(random_image, **self.display_params)
<SYSTEM_TASK:> Attaches the relevant cross-sections to each axis. <END_TASK> <USER_TASK:> Description: def attach(self, image_in, sampler=None, show=True): """Attaches the relevant cross-sections to each axis. Parameters ---------- attach_image : ndarray The image to be attached to the collage, once it is created. Must be atleast 3d. sampler : str or list or callable selection strategy: to identify the type of sampling done to select the slices to return. All sampling is done between the first and last non-empty slice in that view/dimension. - if 'linear' : linearly spaced slices - if list, it is treated as set of percentages at which slices to be sampled (must be in the range of [1-100], not [0-1]). This could be used to more/all slices in the middle e.g. range(40, 60, 5) or at the end e.g. [ 5, 10, 15, 85, 90, 95] - if callable, it must take a 2D image of arbitray size, return True/False to indicate whether to select that slice or not. Only non-empty slices (atleas one non-zero voxel) are provided as input. Simple examples for callable could be based on 1) percentage of non-zero voxels > x etc 2) presence of desired texture ? 3) certain properties of distribution (skewe: dark/bright, energy etc) etc If the sampler returns more than requested `num_slices`, only the first num_slices will be selected. show : bool Flag to request immediate display of collage """
if len(image_in.shape) < 3: raise ValueError('Image must be atleast 3D') # allowing the choice of new sampling for different invocations. if sampler is None: temp_sampler = self.sampler else: temp_sampler = sampler slicer = SlicePicker(image_in=image_in, view_set=self.view_set, num_slices=self.num_slices, sampler=temp_sampler) try: for img_obj, slice_data in zip(self.images, slicer.get_slices()): img_obj.set_data(slice_data) except: self._data_attached = False raise ValueError('unable to attach the given image data to current collage') else: self._data_attached = True # show all the axes if show: self.show()
<SYSTEM_TASK:> Sets the visibility property of all axes. <END_TASK> <USER_TASK:> Description: def _set_visible(self, visibility, grid_index=None): """Sets the visibility property of all axes."""
if grid_index is None: for ax in self.flat_grid: ax.set_visible(visibility) else: if grid_index < 0 or grid_index >= len(self.grids): raise IndexError('Valid indices : 0 to {}'.format(len(self.grids) - 1)) for ax in self.grids[grid_index]: ax.set_visible(visibility)
<SYSTEM_TASK:> Saves the collage to disk as an image. <END_TASK> <USER_TASK:> Description: def save(self, annot=None, output_path=None): """Saves the collage to disk as an image. Parameters ----------- annot : str text to annotate the figure with a super title output_path : str path to save the figure to. Note: any spaces in the filename will be replace with ``_`` """
if annot is not None: self.fig.suptitle(annot, backgroundcolor='black', color='g') if output_path is not None: output_path = output_path.replace(' ', '_') # TODO improve bbox calculations to include ONLY the axes from collage # and nothing else self.fig.savefig(output_path + '.png', bbox_inches='tight', dpi=200, bbox_extra_artists=self.flat_grid)
<SYSTEM_TASK:> Clears all the axes to start fresh. <END_TASK> <USER_TASK:> Description: def clear(self): """Clears all the axes to start fresh."""
for ax in self.flat_grid: for im_h in ax.findobj(AxesImage): im_h.remove()
<SYSTEM_TASK:> Makes note of which dimension needs to be fixed, defaulting to last. <END_TASK> <USER_TASK:> Description: def _add_fixed_dim(self, fixed_dim=-1): """Makes note of which dimension needs to be fixed, defaulting to last."""
if fixed_dim in [-1, None, 'last']: fixed_dim = len(self.input_image.shape) - 1 # last dimension if int(fixed_dim)!=fixed_dim or \ fixed_dim > len(self.input_image.shape) or \ fixed_dim < -1: raise ValueError('invalid value for the dimension to be fixed!' 'Must be an integer in range [0, {}] inclusive' ''.format(len(self.input_image.shape))) if self.input_image.shape[fixed_dim] < 2: raise ValueError('Input image must have atleast two samples ' 'in the fixed dimension. It has {}. ' 'Full image shape: {} ' ''.format(self.input_image.shape[fixed_dim], self.input_image.shape)) self.fixed_dim = int(fixed_dim)
<SYSTEM_TASK:> Constructs the carpet from the input image. <END_TASK> <USER_TASK:> Description: def _make_carpet(self, rescale_data): """ Constructs the carpet from the input image. Optional rescaling of the data. """
self.carpet = self._unroll_array(self.input_image, self.fixed_dim) if rescale_data: self.carpet = row_wise_rescale(self.carpet)
<SYSTEM_TASK:> Displays the carpet in the given axis. <END_TASK> <USER_TASK:> Description: def show(self, clustered=False, ax_carpet=None, label_x_axis='time point', label_y_axis='voxels/ROI'): """ Displays the carpet in the given axis. Parameters ---------- clustered : bool, optional Flag to indicate whether to show the clustered/reduced carpet or the original. You must run .cluster_rows_in_roi() before trying to show clustered carpet. ax_carpet : Axis, optional handle to a valid matplotlib Axis label_x_axis : str String label for the x-axis of the carpet label_y_axis : str String label for the y-axis of the carpet Returns ------- ax_carpet : Axis handle to axis where carpet is shown """
if clustered is True and self._carpet_clustered is False: print('You must run .cluster_rows_in_roi() ' 'before being able to show clustered carpet!') return if ax_carpet is None: self.ax_carpet = plt.gca() else: if not isinstance(ax_carpet, Axes): raise ValueError('Input must be a valid matplotlib Axis!') self.ax_carpet = ax_carpet plt.sca(self.ax_carpet) self.fig = plt.gcf() # vmin/vmax are controlled, because we rescale all to [0, 1] self.imshow_params_carpet = dict(interpolation='none', cmap='gray', aspect='auto', origin='lower', zorder=1) # should we control vmin=0.0, vmax=1.0 ?? if not clustered: self.carpet_handle = self.ax_carpet.imshow(self.carpet, **self.imshow_params_carpet) else: self.carpet_handle = self.ax_carpet.imshow(self.clustered_carpet, **self.imshow_params_carpet) # TODO decorating axes with labels self.ax_carpet.set(xlabel=label_x_axis, ylabel=label_y_axis, frame_on=False) self.ax_carpet.set_ylim(auto=True) return self.ax_carpet
<SYSTEM_TASK:> Saves the current figure with carpet visualization to disk. <END_TASK> <USER_TASK:> Description: def save(self, output_path=None, title=None): """Saves the current figure with carpet visualization to disk. Parameters ---------- output_path : str Path to where the figure needs to be saved to. title : str text to overlay and annotate the visualization (done via plt.suptitle()) """
try: save_figure(self.fig, output_path=output_path, annot=title) except: print('Unable to save the figure to disk! \nException: ') traceback.print_exc()
<SYSTEM_TASK:> Clusters the data within all the ROIs specified in a mask. <END_TASK> <USER_TASK:> Description: def cluster_rows_in_roi(self, roi_mask=None, num_clusters_per_roi=5, metric='minkowski'): """Clusters the data within all the ROIs specified in a mask. Parameters ---------- roi_mask : ndarray or None volumetric mask defining the list of ROIs, with a label for each voxel. This must be the same size in all dimensions except the fixed_dim i.e. if you were making a Carpet from an fMRI image of size 125x125x90x400 fixing the 4th dimension (of size 400), then roi_mask must be of size 125x125x90. num_clusters_per_roi : int number of clusters (n) to form each ROI specified in the roi_mask if n (say 20) is less than number of voxels per a given ROI (say 2000), then data from approx. 2000/20=100 voxels would summarized (averaged by default), into a single cluster. So if the ROI mask had m ROIs (say 10), then the final clustered carpet would have m*n rows (200), regardless of the number of voxels in the 3D image. metric : str distance metric for the hierarchical clustering algorithm; default : 'minkowski' Options: anything accepted by `scipy.spatial.distance.pdist`, which can be: ‘braycurtis’, ‘canberra’, ‘chebyshev’, ‘cityblock’, ‘correlation’, ‘cosine’, ‘dice’, ‘euclidean’, ‘hamming’, ‘jaccard’, ‘kulsinski’, ‘mahalanobis’, ‘matching’, ‘minkowski’, ‘rogerstanimoto’, ‘russellrao’, ‘seuclidean’, ‘sokalmichener’, ‘sokalsneath’, ‘sqeuclidean’, ‘yule’. """
self._set_roi_mask(roi_mask) try: clusters = [self._summarize_in_roi(self.roi_mask == label, num_clusters_per_roi, metric=metric) for label in self.roi_list] self.clustered_carpet = np.vstack(clusters) except: print('unable to produce the clustered carpet - exception:') traceback.print_exc() self._carpet_clustered = False else: self._carpet_clustered = True
<SYSTEM_TASK:> clusters a given matrix by into specified number of clusters according to given metric <END_TASK> <USER_TASK:> Description: def _make_clusters(self, matrix, num_clusters_per_roi, metric): """clusters a given matrix by into specified number of clusters according to given metric"""
from scipy.cluster.hierarchy import fclusterdata # maxclust needed to ensure t is interpreted as # clusters in heirarchical clustering group_ids = fclusterdata(matrix, metric=metric, t=num_clusters_per_roi, criterion='maxclust') group_set = np.unique(group_ids) clusters = [ self._summary_func(matrix[group_ids == group, :], axis=0, keepdims=True) for group in group_set] return np.vstack(clusters).squeeze()
<SYSTEM_TASK:> Removes voxels outside the given mask or ROI set. <END_TASK> <USER_TASK:> Description: def _apply_mask(self, roi_mask): """Removes voxels outside the given mask or ROI set."""
# TODO ensure compatible with input image # - must have < N dim and same size in moving dims. rows_to_delete = list() # to allow for additional masks to be applied in the future if isinstance(roi_mask, np.ndarray): # not (roi_mask is None or roi_mask=='auto'): self._set_roi_mask(roi_mask) rows_roi = np.where(self.roi_mask.flatten() == cfg.background_value) # TODO below would cause differences in size/shape across mask and carpet! self.carpet = np.delete(self.carpet, rows_roi, axis=0) else: self.roi_mask = np.ones(self.carpet.shape)
<SYSTEM_TASK:> Checks mask shape against input image shape. <END_TASK> <USER_TASK:> Description: def _verify_shape_compatibility(self, img, img_type): """Checks mask shape against input image shape."""
if self.input_image.shape[:-1] != img.shape: raise ValueError('Shape of the {} ({}) is not compatible ' 'with input image shape: {} ' ''.format(img_type, img.shape, self.input_image.shape[:-1]))
<SYSTEM_TASK:> Verifies an account activation code a user received by e-mail. <END_TASK> <USER_TASK:> Description: def verify_email(request, code, redirect_to=None): """Verifies an account activation code a user received by e-mail. Requires Messages Django Contrib. :param Requset request: :param str code: :param str redirect_to: :return: """
success = False valid_code = EmailConfirmation.is_valid(code) if valid_code: valid_code.activate() success = True if success: messages.success(request, SIGNUP_VERIFY_EMAIL_SUCCESS_TEXT, 'success') else: messages.error(request, SIGNUP_VERIFY_EMAIL_ERROR_TEXT, 'danger error') if redirect_to is None: redirect_to = '/' return redirect(redirect_to)
<SYSTEM_TASK:> Read and unpack a 16b float. <END_TASK> <USER_TASK:> Description: def unpack_float16(src): """Read and unpack a 16b float. The structure is: - 1 bit for the sign . 5 bits for the exponent, with an exponent bias of 16 - 10 bits for the mantissa """
bc = BitConsumer(src) sign = bc.u_get(1) exponent = bc.u_get(5) mantissa = bc.u_get(10) exponent -= 16 mantissa /= 2 ** 10 num = (-1 ** sign) * mantissa * (10 ** exponent) return num
<SYSTEM_TASK:> Return a number using the given quantity of unsigned bits. <END_TASK> <USER_TASK:> Description: def u_get(self, quant): """Return a number using the given quantity of unsigned bits."""
if not quant: return bits = [] while quant: if self._count == 0: byte = self.src.read(1) number = struct.unpack("<B", byte)[0] self._bits = bin(number)[2:].zfill(8) self._count = 8 if quant > self._count: self._count, quant, toget = 0, quant - self._count, self._count else: self._count, quant, toget = self._count - quant, 0, quant read, self._bits = self._bits[:toget], self._bits[toget:] bits.append(read) data = int("".join(bits), 2) return data
<SYSTEM_TASK:> Return a number using the given quantity of signed bits. <END_TASK> <USER_TASK:> Description: def s_get(self, quant): """Return a number using the given quantity of signed bits."""
if quant < 2: # special case, just return that unsigned value # quant can also be 0 return self.u_get(quant) sign = self.u_get(1) raw_number = self.u_get(quant - 1) if sign == 0: # positive, simplest case number = raw_number else: # negative, complemento a 2 complement = 2 ** (quant - 1) - 1 number = -1 * ((raw_number ^ complement) + 1) return number
<SYSTEM_TASK:> Return a fixed bit number <END_TASK> <USER_TASK:> Description: def fb_get(self, quant, fb=16): """Return a fixed bit number quant: number of bits to read fb: number of bits in the integer and decimal part of the output default is 16, resulting in a 16.16 fixed bit"""
raw_number = self.s_get(quant) if quant == 1: # special case, just return that unsigned value return raw_number return raw_number / (1 << fb)
<SYSTEM_TASK:> Provides different colormaps for different visualization types. <END_TASK> <USER_TASK:> Description: def get_freesurfer_cmap(vis_type): """Provides different colormaps for different visualization types."""
if vis_type in ('cortical_volumetric', 'cortical_contour'): LUT = get_freesurfer_cortical_LUT() cmap = ListedColormap(LUT) elif vis_type in ('labels_volumetric', 'labels_contour'): black = np.array([0, 0, 0, 1]) cmap = plt.get_cmap('hsv') # TODO using more than 20 labels might be a problem? cmap = cmap(np.linspace(0, 1, 20)) # prepending black to paint background as black colors = np.vstack((black, cmap)) cmap = ListedColormap(colors, 'my_colormap') else: raise NotImplementedError('color map for the visualization type {} has not been implemented!'.format(vis_type)) return cmap
<SYSTEM_TASK:> Generate a new RSA key with the specified key size. <END_TASK> <USER_TASK:> Description: def create_rsa_key(bits=2048, keyfile=None, format='PEM', passphrase=None): """ Generate a new RSA key with the specified key size. :param int bits: bit size of the key modulus :param str keyfile: file the key should be written to :param str format: format for the key file, either PEM or DER :param str passphrase: pass phrase for encrypting the key file. If pass phrase is a callable its return value will be used. :return: RSA private key instance """
if passphrase and format != 'PEM': raise Exception( "passphrase is only supported for PEM encoded private keys") rsakey = RSA.generate(bits) if passphrase and isinstance(passphrase, collections.Callable): passphrase = passphrase() output = rsakey.exportKey(format=format, passphrase=passphrase) if keyfile: with open(keyfile, 'w') as outputfile: outputfile.write(output) log.info("generated private key:\n\n%s", output) return rsakey
<SYSTEM_TASK:> Generates a Certificate Signing Request for a given key. <END_TASK> <USER_TASK:> Description: def create_csr(key, dn, csrfilename=None, attributes=None): """ Generates a Certificate Signing Request for a given key. :param Crypto.PublicKey.RSA._RSAobj key: a key :param dn: a distinguished name as dictionary or string with key=value pairs separated by slashes like ``/CN=test.example.org/C=DE/O=Test organisation/`` :param str csrfilename: name of a file to write the CSR to :param tuple attributes: a tuple describing attributes to be included in the CSR :return: a certificate signing request """
certreqInfo = rfc2314.CertificationRequestInfo() certreqInfo.setComponentByName('version', rfc2314.Version(0)) certreqInfo.setComponentByName('subject', _build_dn(dn)) certreqInfo.setComponentByName('subjectPublicKeyInfo', _build_subject_publickey_info(key)) attrpos = certreqInfo.componentType.getPositionByName('attributes') attrtype = certreqInfo.componentType.getTypeByPosition(attrpos) certreqInfo.setComponentByName('attributes', _build_attributes( attributes, attrtype)) certreq = rfc2314.CertificationRequest() certreq.setComponentByName('certificationRequestInfo', certreqInfo) sigAlgIdentifier = rfc2314.SignatureAlgorithmIdentifier() sigAlgIdentifier.setComponentByName( 'algorithm', univ.ObjectIdentifier('1.2.840.113549.1.1.11')) certreq.setComponentByName( 'signatureAlgorithm', sigAlgIdentifier) certreq.setComponentByName( 'signature', _build_signature(key, certreqInfo)) output = _der_to_pem(encoder.encode(certreq), 'CERTIFICATE REQUEST') if csrfilename: with open(csrfilename, 'w') as csrfile: csrfile.write(output) log.info("generated certification request:\n\n%s", output) return output
<SYSTEM_TASK:> Returns a response for the given view & args. <END_TASK> <USER_TASK:> Description: def respond_for(self, view_function, args, kwargs): """Returns a response for the given view & args."""
request = args[0] form = self.get_requested_form(request) if form.is_valid(): result = self.handle_form_valid(request, form) if result: return result self.update_request(request, form) return view_function(*args, **kwargs)
<SYSTEM_TASK:> Updates Request object with flows forms. <END_TASK> <USER_TASK:> Description: def update_request(self, request, form): """Updates Request object with flows forms."""
forms_key = '%s_forms' % self.flow_type # Use ordered forms dict in case _formNode wants to fetch the first defined. flow_dict = OrderedDict() try: flow_dict = request.sitegate[forms_key] except AttributeError: request.sitegate = {} except KeyError: pass flow_dict[self.get_flow_name()] = form request.sitegate[forms_key] = flow_dict
<SYSTEM_TASK:> Helper method. Generic login with username and password. <END_TASK> <USER_TASK:> Description: def login_generic(request, username, password): """Helper method. Generic login with username and password."""
user = authenticate(username=username, password=password) if user is not None and user.is_active: login(request, user) return True return False
<SYSTEM_TASK:> Returns flow argument, as provided with sitegate decorators <END_TASK> <USER_TASK:> Description: def get_arg_or_attr(self, name, default=None): """Returns flow argument, as provided with sitegate decorators or attribute set as a flow class attribute or default."""
if name in self.flow_args: return self.flow_args[name] try: return getattr(self, name) except AttributeError: return default
<SYSTEM_TASK:> Returns an instance of a form requested. <END_TASK> <USER_TASK:> Description: def get_requested_form(self, request): """Returns an instance of a form requested."""
flow_name = self.get_flow_name() flow_key = '%s_flow' % self.flow_type flow_enabled = self.enabled form_data = None if (flow_enabled and request.method == 'POST' and request.POST.get(flow_key, False) and request.POST[flow_key] == flow_name): form_data = request.POST form = self.init_form( form_data, widget_attrs=self.flow_args.get('widget_attrs', None), template=self.get_template_name(self.flow_args.get('template', None)) ) # Attach flow identifying field to differentiate among several possible forms. form.fields[flow_key] = forms.CharField(required=True, initial=flow_name, widget=forms.HiddenInput) form.flow_enabled = flow_enabled form.flow_disabled_text = self.disabled_text return form
<SYSTEM_TASK:> Constructs, populates and returns a form. <END_TASK> <USER_TASK:> Description: def init_form(self, form_data, widget_attrs=None, template=None): """Constructs, populates and returns a form."""
form = self.form(data=form_data) form.template = template # Attach flow attribute to have access from flow forms (usually to call get_arg_or_attr()) form.flow = self if widget_attrs is not None: set_form_widgets_attrs(form, widget_attrs) return form
<SYSTEM_TASK:> Determine the file name for the JSON log. <END_TASK> <USER_TASK:> Description: def log_path(scraper): """ Determine the file name for the JSON log. """
return os.path.join(scraper.config.data_path, '%s.jsonlog' % scraper.name)
<SYSTEM_TASK:> Create two log handlers, one to output info-level ouput to the <END_TASK> <USER_TASK:> Description: def make_logger(scraper): """ Create two log handlers, one to output info-level ouput to the console, the other to store all logging in a JSON file which will later be used to generate reports. """
logger = logging.getLogger('') logger.setLevel(logging.DEBUG) requests_log = logging.getLogger("requests") requests_log.setLevel(logging.WARNING) json_handler = logging.FileHandler(log_path(scraper)) json_handler.setLevel(logging.DEBUG) json_formatter = jsonlogger.JsonFormatter(make_json_format()) json_handler.setFormatter(json_formatter) logger.addHandler(json_handler) console_handler = logging.StreamHandler() console_handler.setLevel(logging.INFO) fmt = '%(name)s [%(levelname)-8s]: %(message)s' formatter = logging.Formatter(fmt) console_handler.setFormatter(formatter) logger.addHandler(console_handler) logger = logging.getLogger(scraper.name) logger = TaskAdapter(logger, scraper) return logger
<SYSTEM_TASK:> Initiate an SA. <END_TASK> <USER_TASK:> Description: def initiate(self, sa): """Initiate an SA. :param sa: the SA to initiate :type sa: dict :return: logs emitted by command, with `errmsg` given on failure :rtype: :py:class:`vici.session.CommandResult` """
response = self.handler.streamed_request("initiate", "control-log", sa) return self._result(*response)
<SYSTEM_TASK:> Terminate an SA. <END_TASK> <USER_TASK:> Description: def terminate(self, sa): """Terminate an SA. :param sa: the SA to terminate :type sa: dict :return: logs emitted by command, with `errmsg` given on failure :rtype: :py:class:`vici.session.CommandResult` """
response = self.handler.streamed_request("terminate", "control-log", sa) return self._result(*response)
<SYSTEM_TASK:> Retrieve installed trap, drop and bypass policies. <END_TASK> <USER_TASK:> Description: def list_policies(self, filters=None): """Retrieve installed trap, drop and bypass policies. :param filters: retrieve only matching policies (optional) :type filters: dict :return: list of installed trap, drop and bypass policies :rtype: list """
_, policy_list = self.handler.streamed_request("list-policies", "list-policy", filters) return policy_list
<SYSTEM_TASK:> Retrieve loaded connections. <END_TASK> <USER_TASK:> Description: def list_conns(self, filters=None): """Retrieve loaded connections. :param filters: retrieve only matching configuration names (optional) :type filters: dict :return: list of connections :rtype: list """
_, connection_list = self.handler.streamed_request("list-conns", "list-conn", filters) return connection_list