text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def render_wavedrom(self, node, outpath, bname, format): """ Render a wavedrom image """
# Try to convert node, raise error with code on failure try: svgout = WaveDrom().renderWaveForm(0, json.loads(node['code'])) except JSONDecodeError as e: raise SphinxError("Cannot render the following json code: \n{} \n\nError: {}".format(node['code'], e)) if not os.path.exists(outpath): os.makedirs(outpath) # SVG can be directly written and is supported on all versions if format == 'image/svg+xml': fname = "{}.{}".format(bname, "svg") fpath = os.path.join(outpath, fname) svgout.saveas(fpath) return fname # It gets a bit ugly, if the output does not support svg. We use cairosvg, because it is the easiest # to use (no dependency on installed programs). But it only works for Python 3. try: import cairosvg except: raise SphinxError(__("Cannot import 'cairosvg'. In Python 2 wavedrom figures other than svg are " "not supported, in Python 3 ensure 'cairosvg' is installed.")) if format == 'application/pdf': fname = "{}.{}".format(bname, "pdf") fpath = os.path.join(outpath, fname) cairosvg.svg2pdf(svgout.tostring(), write_to=fpath) return fname if format == 'image/png': fname = "{}.{}".format(bname, "png") fpath = os.path.join(outpath, fname) cairosvg.svg2png(svgout.tostring(), write_to=fpath) return fname raise SphinxError("No valid wavedrom conversion supplied")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def visit_wavedrom(self, node): """ Visit the wavedrom node """
format = determine_format(self.builder.supported_image_types) if format is None: raise SphinxError(__("Cannot determine a suitable output format")) # Create random filename bname = "wavedrom-{}".format(uuid4()) outpath = path.join(self.builder.outdir, self.builder.imagedir) # Render the wavedrom image imgname = render_wavedrom(self, node, outpath, bname, format) # Now we unpack the image node again. The file was created at the build destination, # and we can now use the standard visitor for the image node. We add the image node # as a child and then raise a SkipDepature, which will trigger the builder to visit # children. image_node = node['image_node'] image_node['uri'] = os.path.join(self.builder.imgpath, imgname) node.append(image_node) raise nodes.SkipDeparture
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def doctree_resolved(app, doctree, fromdocname): """ When the document, and all the links are fully resolved, we inject one raw html element for running the command for processing the wavedrom diagrams at the onload event. """
# Skip for non-html or if javascript is not inlined if not app.env.config.wavedrom_html_jsinline: return text = """ <script type="text/javascript"> function init() { WaveDrom.ProcessAll(); } window.onload = init; </script>""" doctree.append(nodes.raw(text=text, format='html'))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def setup(app): """ Setup the extension """
app.add_config_value('offline_skin_js_path', None, 'html') app.add_config_value('offline_wavedrom_js_path', None, 'html') app.add_config_value('wavedrom_html_jsinline', True, 'html') app.add_directive('wavedrom', WavedromDirective) app.connect('build-finished', build_finished) app.connect('builder-inited', builder_inited) app.connect('doctree-resolved', doctree_resolved) app.add_node(wavedromnode, html = (visit_wavedrom, None), latex = (visit_wavedrom, None), )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def make_supercell(system, matrix, supercell=[1, 1, 1]): """ Return a supercell. This functions takes the input unitcell and creates a supercell of it that is returned as a new :class:`pywindow.molecular.MolecularSystem`. Parameters system : :attr:`pywindow.molecular.MolecularSystem.system` The unit cell for creation of the supercell matrix : :class:`numpy.array` The unit cell parameters in form of a lattice. supercell : :class:`list`, optional A list that specifies the size of the supercell in the a, b and c direction. (default=[1, 1, 1]) Returns ------- :class:`pywindow.molecular.MolecularSystem` Returns the created supercell as a new :class:`MolecularSystem`. """
user_supercell = [[1, supercell[0]], [1, supercell[1]], [1, supercell[1]]] system = create_supercell(system, matrix, supercell=user_supercell) return MolecularSystem.load_system(system)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_frames(self, frames='all', override=False, **kwargs): """ Extract frames from the trajectory file. Depending on the passed parameters a frame, a list of particular frames, a range of frames (from, to), or all frames can be extracted with this function. Parameters frames : :class:`int` or :class:`list` or :class:`touple` or :class:`str` Specified frame (:class:`int`), or frames (:class:`list`), or range (:class:`touple`), or `all`/`everything` (:class:`str`). (default=`all`) override : :class:`bool` If True, a frame already storred in :attr:`frames` can be override. (default=False) extract_data : :class:`bool`, optional If False, a frame is returned as a :class:`str` block as in the trajectory file. Ohterwise, it is extracted and returned as :class:`pywindow.molecular.MolecularSystem`. (default=True) swap_atoms : :class:`dict`, optional If this kwarg is passed with an appropriate dictionary a :func:`pywindow.molecular.MolecularSystem.swap_atom_keys()` will be applied to the extracted frame. forcefield : :class:`str`, optional If this kwarg is passed with appropriate forcefield keyword a :func:`pywindow.molecular.MolecularSystem.decipher_atom_keys()` will be applied to the extracted frame. Returns ------- :class:`pywindow.molecular.MolecularSystem` If a single frame is extracted. None : :class:`NoneType` If more than one frame is extracted, the frames are returned to :attr:`frames` """
if override is True: self.frames = {} if isinstance(frames, int): frame = self._get_frame( self.trajectory_map[frames], frames, **kwargs) if frames not in self.frames.keys(): self.frames[frames] = frame return frame if isinstance(frames, list): for frame in frames: if frame not in self.frames.keys(): self.frames[frame] = self._get_frame( self.trajectory_map[frame], frame, **kwargs) if isinstance(frames, tuple): for frame in range(frames[0], frames[1]): if frame not in self.frames.keys(): self.frames[frame] = self._get_frame( self.trajectory_map[frame], frame, **kwargs) if isinstance(frames, str): if frames in ['all', 'everything']: for frame in range(0, self.no_of_frames): if frame not in self.frames.keys(): self.frames[frame] = self._get_frame( self.trajectory_map[frame], frame, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _map_trajectory(self): """ Return filepath as a class attribute"""
self.trajectory_map = {} with open(self.filepath, 'r') as trajectory_file: with closing( mmap( trajectory_file.fileno(), 0, access=ACCESS_READ)) as mapped_file: progress = 0 line = 0 frame = -1 frame_start = 0 while progress <= len(mapped_file): line = line + 1 # We read a binary data from a mapped file. bline = mapped_file.readline() # If the bline length equals zero we terminate. # We reached end of the file but still add the last frame! if len(bline) == 0: frame = frame + 1 if progress - frame_start > 10: self.trajectory_map[frame] = [ frame_start, progress ] break # We need to decode byte line into an utf-8 string. sline = bline.decode("utf-8").strip('\n').split() # We extract map's byte coordinates for each frame if len(sline) == 1 and sline[0] == 'END': frame = frame + 1 self.trajectory_map[frame] = [frame_start, progress] frame_start = progress # Here we extract the map's byte coordinates for the header # And also the periodic system type needed for later. progress = progress + len(bline) self.no_of_frames = frame
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def full_analysis(self, ncpus=1, **kwargs): """ Perform a full structural analysis of a molecule. This invokes other methods: 1. :attr:`molecular_weight()` 2. :attr:`calculate_centre_of_mass()` 3. :attr:`calculate_maximum_diameter()` 4. :attr:`calculate_average_diameter()` 5. :attr:`calculate_pore_diameter()` 6. :attr:`calculate_pore_volume()` 7. :attr:`calculate_pore_diameter_opt()` 8. :attr:`calculate_pore_volume_opt()` 9. :attr:`calculate_pore_diameter_opt()` 10. :attr:`calculate_windows()` Parameters ncpus : :class:`int` Number of CPUs used for the parallelised parts of :func:`pywindow.utilities.find_windows()`. (default=1=serial) Returns ------- :attr:`Molecule.properties` The updated :attr:`Molecule.properties` with returns of all used methods. """
self.molecular_weight() self.calculate_centre_of_mass() self.calculate_maximum_diameter() self.calculate_average_diameter() self.calculate_pore_diameter() self.calculate_pore_volume() self.calculate_pore_diameter_opt(**kwargs) self.calculate_pore_volume_opt(**kwargs) self.calculate_windows(ncpus=ncpus, **kwargs) # self._circumcircle(**kwargs) return self.properties
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def calculate_centre_of_mass(self): """ Return the xyz coordinates of the centre of mass of a molecule. Returns ------- :class:`numpy.array` The centre of mass of the molecule. """
self.centre_of_mass = center_of_mass(self.elements, self.coordinates) self.properties['centre_of_mass'] = self.centre_of_mass return self.centre_of_mass
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def calculate_maximum_diameter(self): """ Return the maximum diamension of a molecule. Returns ------- :class:`float` The maximum dimension of the molecule. """
self.maxd_atom_1, self.maxd_atom_2, self.maximum_diameter = max_dim( self.elements, self.coordinates) self.properties['maximum_diameter'] = { 'diameter': self.maximum_diameter, 'atom_1': int(self.maxd_atom_1), 'atom_2': int(self.maxd_atom_2), } return self.maximum_diameter
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def calculate_average_diameter(self, **kwargs): """ Return the average diamension of a molecule. Returns ------- :class:`float` The average dimension of the molecule. """
self.average_diameter = find_average_diameter( self.elements, self.coordinates, **kwargs) return self.average_diameter
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def calculate_pore_diameter(self): """ Return the intrinsic pore diameter. Returns ------- :class:`float` The intrinsic pore diameter. """
self.pore_diameter, self.pore_closest_atom = pore_diameter( self.elements, self.coordinates) self.properties['pore_diameter'] = { 'diameter': self.pore_diameter, 'atom': int(self.pore_closest_atom), } return self.pore_diameter
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def calculate_pore_volume(self): """ Return the intrinsic pore volume. Returns ------- :class:`float` The intrinsic pore volume. """
self.pore_volume = sphere_volume(self.calculate_pore_diameter() / 2) self.properties['pore_volume'] = self.pore_volume return self.pore_volume
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def calculate_windows(self, **kwargs): """ Return the diameters of all windows in a molecule. This function first finds and then measures the diameters of all the window in the molecule. Returns ------- :class:`numpy.array` An array of windows' diameters. :class:`NoneType` If no windows were found. """
windows = find_windows(self.elements, self.coordinates, **kwargs) if windows: self.properties.update( { 'windows': { 'diameters': windows[0], 'centre_of_mass': windows[1], } } ) return windows[0] else: self.properties.update( {'windows': {'diameters': None, 'centre_of_mass': None, }} ) return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def shift_to_origin(self, **kwargs): """ Shift a molecule to Origin. This function takes the molecule's coordinates and adjust them so that the centre of mass of the molecule coincides with the origin of the coordinate system. Returns ------- None : :class:`NoneType` """
self.coordinates = shift_com(self.elements, self.coordinates, **kwargs) self._update()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def rebuild_system(self, override=False, **kwargs): """ Rebuild molecules in molecular system. Parameters override : :class:`bool`, optional (default=False) If False the rebuild molecular system is returned as a new :class:`MolecularSystem`, if True, the current :class:`MolecularSystem` is modified. """
# First we create a 3x3x3 supercell with the initial unit cell in the # centre and the 26 unit cell translations around to provide all the # atom positions necessary for the molecules passing through periodic # boundary reconstruction step. supercell_333 = create_supercell(self.system, **kwargs) # smolsys = self.load_system(supercell_333, self.system_id + '_311') # smolsys.dump_system(override=True) discrete = discrete_molecules(self.system, rebuild=supercell_333) # This function overrides the initial data for 'coordinates', # 'atom_ids', and 'elements' instances in the 'system' dictionary. coordinates = np.array([], dtype=np.float64).reshape(0, 3) atom_ids = np.array([]) elements = np.array([]) for i in discrete: coordinates = np.concatenate( [coordinates, i['coordinates']], axis=0 ) atom_ids = np.concatenate([atom_ids, i['atom_ids']], axis=0) elements = np.concatenate([elements, i['elements']], axis=0) rebuild_system = { 'coordinates': coordinates, 'atom_ids': atom_ids, 'elements': elements } if override is True: self.system.update(rebuild_system) return None else: return self.load_system(rebuild_system)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def swap_atom_keys(self, swap_dict, dict_key='atom_ids'): """ Swap a force field atom id for another user-defined value. This modified all values in :attr:`MolecularSystem.system['atom_ids']` that match criteria. This function can be used to decipher a whole forcefield if an appropriate dictionary is passed to the function. Example ------- In this example all atom ids 'he' will be exchanged to 'H'. .. code-block:: python pywindow.molecular.MolecularSystem.swap_atom_keys({'he': 'H'}) Parameters swap_dict: :class:`dict` A dictionary containg force field atom ids (keys) to be swapped with corresponding values (keys' arguments). dict_key: :class:`str` A key in :attr:`MolecularSystem.system` dictionary to perform the atom keys swapping operation on. (default='atom_ids') Returns ------- None : :class:`NoneType` """
# Similar situation to the one from decipher_atom_keys function. if 'atom_ids' not in self.system.keys(): dict_key = 'elements' for atom_key in range(len(self.system[dict_key])): for key in swap_dict.keys(): if self.system[dict_key][atom_key] == key: self.system[dict_key][atom_key] = swap_dict[key]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def decipher_atom_keys(self, forcefield='DLF', dict_key='atom_ids'): """ Decipher force field atom ids. This takes all values in :attr:`MolecularSystem.system['atom_ids']` that match force field type criteria and creates :attr:`MolecularSystem.system['elements']` with the corresponding periodic table of elements equivalents. If a forcefield is not supported by this method, the :func:`MolecularSystem.swap_atom_keys()` can be used instead. DLF stands for DL_F notation. See: C. W. Yong, Descriptions and Implementations of DL_F Notation: A Natural Chemical Expression System of Atom Types for Molecular Simulations, J. Chem. Inf. Model., 2016, 56, 1405–1409. Parameters forcefield : :class:`str` The forcefield used to decipher atom ids. Allowed (not case sensitive): 'OPLS', 'OPLS2005', 'OPLSAA', 'OPLS3', 'DLF', 'DL_F'. (default='DLF') dict_key : :class:`str` The :attr:`MolecularSystem.system` dictionary key to the array containing the force field atom ids. (default='atom_ids') Returns ------- None : :class:`NoneType` """
# In case there is no 'atom_ids' key we try 'elements'. This is for # XYZ and MOL files mostly. But, we keep the dict_key keyword for # someone who would want to decipher 'elements' even if 'atom_ids' key # is present in the system's dictionary. if 'atom_ids' not in self.system.keys(): dict_key = 'elements' # I do it on temporary object so that it only finishes when successful temp = deepcopy(self.system[dict_key]) for element in range(len(temp)): temp[element] = "{0}".format( decipher_atom_key( temp[element], forcefield=forcefield)) self.system['elements'] = temp
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_pores(self, sampling_points): """ Under development."""
pores = [] for point in sampling_points: pores.append( Pore( self.system['elements'], self.system['coordinates'], com=point)) return pores
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_attrname_by_colname(instance, name): """ Get value from SQLAlchemy instance by column name :Parameters: - `instance`: SQLAlchemy model instance. - `name`: Column name :Examples: 'left' """
for attr, column in list(sqlalchemy.inspect(instance.__class__).c.items()): if column.name == name: return attr
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_empty_instance(table): """ Return empty instance of model. """
instance_defaults_params = inspect.getargspec(table.__init__).args[1:] # list like ['name', 'group', 'visible'] to dict with empty # value as {'name': None, 'group': None, 'visible': None} init = dict( list(zip(instance_defaults_params, itertools.repeat(None))) ) return table(**init)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_pk(obj): """ Return primary key name by model class or instance. :Parameters: - `obj`: SQLAlchemy model instance or class. :Examples: (Column('id', Integer(), table=<users>, primary_key=True, nullable=False),) (Column('id', Integer(), table=<users>, primary_key=True, nullable=False),) """
if inspect.isclass(obj): pk_list = sqlalchemy.inspect(obj).primary_key else: pk_list = obj.__mapper__.primary_key return pk_list
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clear_rules_from_all_proxies(self): """ Clear fault injection rules from all known service proxies. """
self._queue = [] if self.debug: print 'Clearing rules' for service in self.app.get_services(): for instance in self.app.get_service_instances(service): if self.debug: print 'Clearing rules for %s - instance %s' % (service, instance) resp = requests.delete("http://{}/gremlin/v1/rules".format(instance)) if resp.status_code != 200: print 'Failed to clear rules for %s - instance %s' % (service, instance)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def setup_failure(self, scenario=None, **args): """Add a given failure scenario @param scenario: string 'delayrequests' or 'crash' """
assert scenario is not None and scenario in self.functiondict self.functiondict[scenario](**args)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def setup_failures(self, gremlins): """Add gremlins to environment"""
assert isinstance(gremlins, dict) and 'gremlins' in gremlins for gremlin in gremlins['gremlins']: self.setup_failure(**gremlin) self.push_rules()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_vertex(self, x, y, z, name): """add vertex by coordinate and uniq name x y z is coordinates of vertex name is uniq name to refer the vertex returns Vertex object whici is added. """
self.vertices[name] = Vertex(x, y, z, name) return self.vertices[name]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def reduce_vertex(self, name1, *names): same Vertex instance as name1 """
v = self.vertices[name1] for n in names: w = self.vertices[n] v.alias.update(w.alias) # replace mapping from n w by to v self.vertices[n] = v
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def merge_vertices(self): """call reduce_vertex on all vertices with identical values."""
# groupby expects sorted data sorted_vertices = sorted(list(self.vertices.items()), key=lambda v: hash(v[1])) groups = [] for k, g in groupby(sorted_vertices, lambda v: hash(v[1])): groups.append(list(g)) for group in groups: if len(group) == 1: continue names = [v[0] for v in group] self.reduce_vertex(*names)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def extract(path, to_path='', ext='', **kwargs): """ Unpack the tar or zip file at the specified path to the directory specified by to_path. """
Archive(path, ext=ext).extract(to_path, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _archive_cls(file, ext=''): """ Return the proper Archive implementation class, based on the file type. """
cls = None filename = None if is_string(file): filename = file else: try: filename = file.name except AttributeError: raise UnrecognizedArchiveFormat( "File object not a recognized archive format.") lookup_filename = filename + ext base, tail_ext = os.path.splitext(lookup_filename.lower()) cls = extension_map.get(tail_ext) if not cls: base, ext = os.path.splitext(base) cls = extension_map.get(ext) if not cls: raise UnrecognizedArchiveFormat( "Path not a recognized archive format: %s" % filename) return cls
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_files(self, to_path=None): """ Check that all of the files contained in the archive are within the target directory. """
if to_path: target_path = os.path.normpath(os.path.realpath(to_path)) else: target_path = os.getcwd() for filename in self.filenames(): extract_path = os.path.join(target_path, filename) extract_path = os.path.normpath(os.path.realpath(extract_path)) if not extract_path.startswith(target_path): raise UnsafeArchive( "Archive member destination is outside the target" " directory. member: %s" % filename)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def meanstack(infn: Path, Navg: int, ut1: Optional[datetime]=None, method: str='mean') -> Tuple[np.ndarray, Optional[datetime]]: infn = Path(infn).expanduser() # %% parse indicies to load if isinstance(Navg, slice): key = Navg elif isinstance(Navg, int): key = slice(0, Navg) elif len(Navg) == 1: key = slice(0, Navg[0]) elif len(Navg) == 2: key = slice(Navg[0], Navg[1]) else: raise ValueError(f'not sure what you mean by Navg={Navg}') # %% load images """ some methods handled individually to improve efficiency with huge files """
if infn.suffix == '.h5': img, ut1 = _h5mean(infn, ut1, key, method) elif infn.suffix == '.fits': with fits.open(infn, mode='readonly', memmap=False) as f: # mmap doesn't work with BZERO/BSCALE/BLANK img = collapsestack(f[0].data, key, method) elif infn.suffix == '.mat': img = loadmat(infn) img = collapsestack(img['data'].T, key, method) # matlab is fortran order else: # .tif etc. img = imageio.imread(infn, as_gray=True) if img.ndim in (3, 4) and img.shape[-1] == 3: # assume RGB img = collapsestack(img, key, method) return img, ut1
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _run_search_request( self, protocol_request, object_name, protocol_response_class): """ Runs the specified request at the specified object_name and instantiates an object of the specified class. We yield each object in listAttr. If pages of results are present, repeat this process until the pageToken is null. """
not_done = True while not_done: response_object = self._run_search_page_request( protocol_request, object_name, protocol_response_class) value_list = getattr( response_object, protocol.getValueListName(protocol_response_class)) for extract in value_list: yield extract not_done = bool(response_object.next_page_token) protocol_request.page_token = response_object.next_page_token
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def list_reference_bases(self, id_, start=0, end=None): """ Returns an iterator over the bases from the server in the form of consecutive strings. This command does not conform to the patterns of the other search and get requests, and is implemented differently. """
request = protocol.ListReferenceBasesRequest() request.start = pb.int(start) request.end = pb.int(end) request.reference_id = id_ not_done = True # TODO We should probably use a StringIO here to make string buffering # a bit more efficient. bases_list = [] while not_done: response = self._run_list_reference_bases_page_request(request) bases_list.append(response.sequence) not_done = bool(response.next_page_token) request.page_token = response.next_page_token return "".join(bases_list)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def search_variants( self, variant_set_id, start=None, end=None, reference_name=None, call_set_ids=None): """ Returns an iterator over the Variants fulfilling the specified conditions from the specified VariantSet. :param str variant_set_id: The ID of the :class:`ga4gh.protocol.VariantSet` of interest. :param int start: Required. The beginning of the window (0-based, inclusive) for which overlapping variants should be returned. Genomic positions are non-negative integers less than reference length. Requests spanning the join of circular genomes are represented as two requests one on each side of the join (position 0). :param int end: Required. The end of the window (0-based, exclusive) for which overlapping variants should be returned. :param str reference_name: The name of the :class:`ga4gh.protocol.Reference` we wish to return variants from. :param list call_set_ids: Only return variant calls which belong to call sets with these IDs. If an empty array, returns variants without any call objects. If null, returns all variant calls. :return: An iterator over the :class:`ga4gh.protocol.Variant` objects defined by the query parameters. :rtype: iter """
request = protocol.SearchVariantsRequest() request.reference_name = pb.string(reference_name) request.start = pb.int(start) request.end = pb.int(end) request.variant_set_id = variant_set_id request.call_set_ids.extend(pb.string(call_set_ids)) request.page_size = pb.int(self._page_size) return self._run_search_request( request, "variants", protocol.SearchVariantsResponse)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def search_variant_annotations( self, variant_annotation_set_id, reference_name="", reference_id="", start=0, end=0, effects=[]): """ Returns an iterator over the Variant Annotations fulfilling the specified conditions from the specified VariantSet. :param str variant_annotation_set_id: The ID of the :class:`ga4gh.protocol.VariantAnnotationSet` of interest. :param int start: Required. The beginning of the window (0-based, inclusive) for which overlapping variants should be returned. Genomic positions are non-negative integers less than reference length. Requests spanning the join of circular genomes are represented as two requests one on each side of the join (position 0). :param int end: Required. The end of the window (0-based, exclusive) for which overlapping variants should be returned. :param str reference_name: The name of the :class:`ga4gh.protocol.Reference` we wish to return variants from. :return: An iterator over the :class:`ga4gh.protocol.VariantAnnotation` objects defined by the query parameters. :rtype: iter """
request = protocol.SearchVariantAnnotationsRequest() request.variant_annotation_set_id = variant_annotation_set_id request.reference_name = reference_name request.reference_id = reference_id request.start = start request.end = end for effect in effects: request.effects.add().CopyFrom(protocol.OntologyTerm(**effect)) for effect in request.effects: if not effect.term_id: raise exceptions.ErrantRequestException( "Each ontology term should have an id set") request.page_size = pb.int(self._page_size) return self._run_search_request( request, "variantannotations", protocol.SearchVariantAnnotationsResponse)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def search_features( self, feature_set_id=None, parent_id="", reference_name="", start=0, end=0, feature_types=[], name="", gene_symbol=""): """ Returns the result of running a search_features method on a request with the passed-in parameters. :param str feature_set_id: ID of the feature Set being searched :param str parent_id: ID (optional) of the parent feature :param str reference_name: name of the reference to search (ex: "chr1") :param int start: search start position on reference :param int end: end position on reference :param feature_types: array of terms to limit search by (ex: "gene") :param str name: only return features with this name :param str gene_symbol: only return features on this gene :return: an iterator over Features as returned in the SearchFeaturesResponse object. """
request = protocol.SearchFeaturesRequest() request.feature_set_id = feature_set_id request.parent_id = parent_id request.reference_name = reference_name request.name = name request.gene_symbol = gene_symbol request.start = start request.end = end request.feature_types.extend(feature_types) request.page_size = pb.int(self._page_size) return self._run_search_request( request, "features", protocol.SearchFeaturesResponse)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def search_continuous( self, continuous_set_id=None, reference_name="", start=0, end=0): """ Returns the result of running a search_continuous method on a request with the passed-in parameters. :param str continuous_set_id: ID of the ContinuousSet being searched :param str reference_name: name of the reference to search (ex: "chr1") :param int start: search start position on reference :param int end: end position on reference :return: an iterator over Continuous returned in the SearchContinuousResponse object. """
request = protocol.SearchContinuousRequest() request.continuous_set_id = continuous_set_id request.reference_name = reference_name request.start = start request.end = end request.page_size = pb.int(self._page_size) return self._run_search_request( request, "continuous", protocol.SearchContinuousResponse)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def search_datasets(self): """ Returns an iterator over the Datasets on the server. :return: An iterator over the :class:`ga4gh.protocol.Dataset` objects on the server. """
request = protocol.SearchDatasetsRequest() request.page_size = pb.int(self._page_size) return self._run_search_request( request, "datasets", protocol.SearchDatasetsResponse)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def search_variant_sets(self, dataset_id): """ Returns an iterator over the VariantSets fulfilling the specified conditions from the specified Dataset. :param str dataset_id: The ID of the :class:`ga4gh.protocol.Dataset` of interest. :return: An iterator over the :class:`ga4gh.protocol.VariantSet` objects defined by the query parameters. """
request = protocol.SearchVariantSetsRequest() request.dataset_id = dataset_id request.page_size = pb.int(self._page_size) return self._run_search_request( request, "variantsets", protocol.SearchVariantSetsResponse)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def search_variant_annotation_sets(self, variant_set_id): """ Returns an iterator over the Annotation Sets fulfilling the specified conditions from the specified variant set. :param str variant_set_id: The ID of the :class:`ga4gh.protocol.VariantSet` of interest. :return: An iterator over the :class:`ga4gh.protocol.AnnotationSet` objects defined by the query parameters. """
request = protocol.SearchVariantAnnotationSetsRequest() request.variant_set_id = variant_set_id request.page_size = pb.int(self._page_size) return self._run_search_request( request, "variantannotationsets", protocol.SearchVariantAnnotationSetsResponse)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def search_feature_sets(self, dataset_id): """ Returns an iterator over the FeatureSets fulfilling the specified conditions from the specified Dataset. :param str dataset_id: The ID of the :class:`ga4gh.protocol.Dataset` of interest. :return: An iterator over the :class:`ga4gh.protocol.FeatureSet` objects defined by the query parameters. """
request = protocol.SearchFeatureSetsRequest() request.dataset_id = dataset_id request.page_size = pb.int(self._page_size) return self._run_search_request( request, "featuresets", protocol.SearchFeatureSetsResponse)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def search_continuous_sets(self, dataset_id): """ Returns an iterator over the ContinuousSets fulfilling the specified conditions from the specified Dataset. :param str dataset_id: The ID of the :class:`ga4gh.protocol.Dataset` of interest. :return: An iterator over the :class:`ga4gh.protocol.ContinuousSet` objects defined by the query parameters. """
request = protocol.SearchContinuousSetsRequest() request.dataset_id = dataset_id request.page_size = pb.int(self._page_size) return self._run_search_request( request, "continuoussets", protocol.SearchContinuousSetsResponse)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def search_reference_sets( self, accession=None, md5checksum=None, assembly_id=None): """ Returns an iterator over the ReferenceSets fulfilling the specified conditions. :param str accession: If not null, return the reference sets for which the `accession` matches this string (case-sensitive, exact match). :param str md5checksum: If not null, return the reference sets for which the `md5checksum` matches this string (case-sensitive, exact match). See :class:`ga4gh.protocol.ReferenceSet::md5checksum` for details. :param str assembly_id: If not null, return the reference sets for which the `assembly_id` matches this string (case-sensitive, exact match). :return: An iterator over the :class:`ga4gh.protocol.ReferenceSet` objects defined by the query parameters. """
request = protocol.SearchReferenceSetsRequest() request.accession = pb.string(accession) request.md5checksum = pb.string(md5checksum) request.assembly_id = pb.string(assembly_id) request.page_size = pb.int(self._page_size) return self._run_search_request( request, "referencesets", protocol.SearchReferenceSetsResponse)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def search_references( self, reference_set_id, accession=None, md5checksum=None): """ Returns an iterator over the References fulfilling the specified conditions from the specified Dataset. :param str reference_set_id: The ReferenceSet to search. :param str accession: If not None, return the references for which the `accession` matches this string (case-sensitive, exact match). :param str md5checksum: If not None, return the references for which the `md5checksum` matches this string (case-sensitive, exact match). :return: An iterator over the :class:`ga4gh.protocol.Reference` objects defined by the query parameters. """
request = protocol.SearchReferencesRequest() request.reference_set_id = reference_set_id request.accession = pb.string(accession) request.md5checksum = pb.string(md5checksum) request.page_size = pb.int(self._page_size) return self._run_search_request( request, "references", protocol.SearchReferencesResponse)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def search_call_sets(self, variant_set_id, name=None, biosample_id=None): """ Returns an iterator over the CallSets fulfilling the specified conditions from the specified VariantSet. :param str variant_set_id: Find callsets belonging to the provided variant set. :param str name: Only CallSets matching the specified name will be returned. :param str biosample_id: Only CallSets matching this id will be returned. :return: An iterator over the :class:`ga4gh.protocol.CallSet` objects defined by the query parameters. """
request = protocol.SearchCallSetsRequest() request.variant_set_id = variant_set_id request.name = pb.string(name) request.biosample_id = pb.string(biosample_id) request.page_size = pb.int(self._page_size) return self._run_search_request( request, "callsets", protocol.SearchCallSetsResponse)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def search_biosamples(self, dataset_id, name=None, individual_id=None): """ Returns an iterator over the Biosamples fulfilling the specified conditions. :param str dataset_id: The dataset to search within. :param str name: Only Biosamples matching the specified name will be returned. :param str individual_id: Only Biosamples matching matching this id will be returned. :return: An iterator over the :class:`ga4gh.protocol.Biosample` objects defined by the query parameters. """
request = protocol.SearchBiosamplesRequest() request.dataset_id = dataset_id request.name = pb.string(name) request.individual_id = pb.string(individual_id) request.page_size = pb.int(self._page_size) return self._run_search_request( request, "biosamples", protocol.SearchBiosamplesResponse)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def search_individuals(self, dataset_id, name=None): """ Returns an iterator over the Individuals fulfilling the specified conditions. :param str dataset_id: The dataset to search within. :param str name: Only Individuals matching the specified name will be returned. :return: An iterator over the :class:`ga4gh.protocol.Biosample` objects defined by the query parameters. """
request = protocol.SearchIndividualsRequest() request.dataset_id = dataset_id request.name = pb.string(name) request.page_size = pb.int(self._page_size) return self._run_search_request( request, "individuals", protocol.SearchIndividualsResponse)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def search_read_group_sets( self, dataset_id, name=None, biosample_id=None): """ Returns an iterator over the ReadGroupSets fulfilling the specified conditions from the specified Dataset. :param str name: Only ReadGroupSets matching the specified name will be returned. :param str biosample_id: Only ReadGroups matching the specified biosample will be included in the response. :return: An iterator over the :class:`ga4gh.protocol.ReadGroupSet` objects defined by the query parameters. :rtype: iter """
request = protocol.SearchReadGroupSetsRequest() request.dataset_id = dataset_id request.name = pb.string(name) request.biosample_id = pb.string(biosample_id) request.page_size = pb.int(self._page_size) return self._run_search_request( request, "readgroupsets", protocol.SearchReadGroupSetsResponse)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def search_reads( self, read_group_ids, reference_id=None, start=None, end=None): """ Returns an iterator over the Reads fulfilling the specified conditions from the specified read_group_ids. :param str read_group_ids: The IDs of the :class:`ga4gh.protocol.ReadGroup` of interest. :param str reference_id: The name of the :class:`ga4gh.protocol.Reference` we wish to return reads mapped to. :param int start: The start position (0-based) of this query. If a reference is specified, this defaults to 0. Genomic positions are non-negative integers less than reference length. Requests spanning the join of circular genomes are represented as two requests one on each side of the join (position 0). :param int end: The end position (0-based, exclusive) of this query. If a reference is specified, this defaults to the reference's length. :return: An iterator over the :class:`ga4gh.protocol.ReadAlignment` objects defined by the query parameters. :rtype: iter """
request = protocol.SearchReadsRequest() request.read_group_ids.extend(read_group_ids) request.reference_id = pb.string(reference_id) request.start = pb.int(start) request.end = pb.int(end) request.page_size = pb.int(self._page_size) return self._run_search_request( request, "reads", protocol.SearchReadsResponse)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def search_phenotype_association_sets(self, dataset_id): """ Returns an iterator over the PhenotypeAssociationSets on the server. """
request = protocol.SearchPhenotypeAssociationSetsRequest() request.dataset_id = dataset_id request.page_size = pb.int(self._page_size) return self._run_search_request( request, "phenotypeassociationsets", protocol.SearchPhenotypeAssociationSetsResponse)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def search_genotype_phenotype( self, phenotype_association_set_id=None, feature_ids=None, phenotype_ids=None, evidence=None): """ Returns an iterator over the GeneotypePhenotype associations from the server """
request = protocol.SearchGenotypePhenotypeRequest() request.phenotype_association_set_id = phenotype_association_set_id if feature_ids: request.feature_ids.extend(feature_ids) if phenotype_ids: request.phenotype_ids.extend(phenotype_ids) if evidence: request.evidence.extend(evidence) request.page_size = pb.int(self._page_size) self._logger.debug("search_genotype_phenotype {}".format(request)) return self._run_search_request( request, "featurephenotypeassociations", protocol.SearchGenotypePhenotypeResponse)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def search_phenotype( self, phenotype_association_set_id=None, phenotype_id=None, description=None, type_=None, age_of_onset=None): """ Returns an iterator over the Phenotypes from the server """
request = protocol.SearchPhenotypesRequest() request.phenotype_association_set_id = phenotype_association_set_id if phenotype_id: request.id = phenotype_id if description: request.description = description if type_: request.type.mergeFrom(type_) if age_of_onset: request.age_of_onset = age_of_onset request.page_size = pb.int(self._page_size) return self._run_search_request( request, "phenotypes", protocol.SearchPhenotypesResponse)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def search_rna_quantification_sets(self, dataset_id): """ Returns an iterator over the RnaQuantificationSet objects from the server """
request = protocol.SearchRnaQuantificationSetsRequest() request.dataset_id = dataset_id request.page_size = pb.int(self._page_size) return self._run_search_request( request, "rnaquantificationsets", protocol.SearchRnaQuantificationSetsResponse)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def search_rna_quantifications(self, rna_quantification_set_id=""): """ Returns an iterator over the RnaQuantification objects from the server :param str rna_quantification_set_id: The ID of the :class:`ga4gh.protocol.RnaQuantificationSet` of interest. """
request = protocol.SearchRnaQuantificationsRequest() request.rna_quantification_set_id = rna_quantification_set_id request.page_size = pb.int(self._page_size) return self._run_search_request( request, "rnaquantifications", protocol.SearchRnaQuantificationsResponse)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def search_expression_levels( self, rna_quantification_id="", names=[], threshold=0.0): """ Returns an iterator over the ExpressionLevel objects from the server :param str feature_ids: The IDs of the :class:`ga4gh.protocol.Feature` of interest. :param str rna_quantification_id: The ID of the :class:`ga4gh.protocol.RnaQuantification` of interest. :param float threshold: Minimum expression of responses to return. """
request = protocol.SearchExpressionLevelsRequest() request.rna_quantification_id = rna_quantification_id request.names.extend(names) request.threshold = threshold request.page_size = pb.int(self._page_size) return self._run_search_request( request, "expressionlevels", protocol.SearchExpressionLevelsResponse)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _setup_http_session(self): """ Sets up the common HTTP session parameters used by requests. """
headers = {"Content-type": "application/json"} if (self._id_token): headers.update({"authorization": "Bearer {}".format( self._id_token)}) self._session.headers.update(headers) # TODO is this unsafe???? self._session.verify = False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _check_response_status(self, response): """ Checks the speficied HTTP response from the requests package and raises an exception if a non-200 HTTP code was returned by the server. """
if response.status_code != requests.codes.ok: self._logger.error("%s %s", response.status_code, response.text) raise exceptions.RequestNonSuccessException( "Url {0} had status_code {1}".format( response.url, response.status_code))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def init_app(self, app): """ Initialize the captcha extension to the given app object. """
self.enabled = app.config.get("CAPTCHA_ENABLE", True) self.digits = app.config.get("CAPTCHA_LENGTH", 4) self.max = 10**self.digits self.image_generator = ImageCaptcha() self.rand = SystemRandom() def _generate(): if not self.enabled: return "" base64_captcha = self.generate() return Markup("<img src='{}'>".format("data:image/png;base64, {}".format(base64_captcha))) app.jinja_env.globals['captcha'] = _generate # Check for sessions that do not persist on the server. Issue a warning because they are most likely open to replay attacks. # This addon is built upon flask-session. session_type = app.config.get('SESSION_TYPE', None) if session_type is None or session_type == "null": raise RuntimeWarning("Flask-Sessionstore is not set to use a server persistent storage type. This likely means that captchas are vulnerable to replay attacks.") elif session_type == "sqlalchemy": # I have to do this as of version 0.3.1 of flask-session if using sqlalchemy as the session type in order to create the initial database. # Flask-sessionstore seems to have the same problem. app.session_interface.db.create_all()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update(self): """ Update Monit deamon and services status. """
url = self.baseurl + '/_status?format=xml' response = self.s.get(url) response.raise_for_status() from xml.etree.ElementTree import XML root = XML(response.text) for serv_el in root.iter('service'): serv = Monit.Service(self, serv_el) self[serv.name] = serv # Pendingaction occurs when a service is stopping if self[serv.name].pendingaction: time.sleep(1) return Monit.update(self) # Monitor == 2 when service in startup if self[serv.name].monitorState == 2: time.sleep(1) return Monit.update(self)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def init_recv(self): """ Parses the IKE_INIT response packet received from Responder. Assigns the correct values of rSPI and Nr Calculates Diffie-Hellman exchange and assigns all keys to self. """
assert len(self.packets) == 2 packet = self.packets[-1] for p in packet.payloads: if p._type == payloads.Type.Nr: self.Nr = p._data logger.debug(u"Responder nonce {}".format(binascii.hexlify(self.Nr))) elif p._type == payloads.Type.KE: int_from_bytes = int.from_bytes(p.kex_data, 'big') self.diffie_hellman.derivate(int_from_bytes) else: logger.debug('Ignoring: {}'.format(p)) logger.debug('Nonce I: {}\nNonce R: {}'.format(binascii.hexlify(self.Ni), binascii.hexlify(self.Nr))) logger.debug('DH shared secret: {}'.format(binascii.hexlify(self.diffie_hellman.shared_secret))) SKEYSEED = prf(self.Ni + self.Nr, self.diffie_hellman.shared_secret) logger.debug(u"SKEYSEED is: {0!r:s}\n".format(binascii.hexlify(SKEYSEED))) keymat = prfplus(SKEYSEED, (self.Ni + self.Nr + to_bytes(self.iSPI) + to_bytes(self.rSPI)), 32 * 7) #3 * 32 + 2 * 32 + 2 * 32) logger.debug("Got %d bytes of key material" % len(keymat)) # get keys from material ( self.SK_d, self.SK_ai, self.SK_ar, self.SK_ei, self.SK_er, self.SK_pi, self.SK_pr ) = unpack("32s" * 7, keymat) # XXX: Should support other than 256-bit algorithms, really. logger.debug("SK_ai: {}".format(dump(self.SK_ai))) logger.debug("SK_ei: {}".format(dump(self.SK_ei)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def authenticate_peer(self, auth_data, peer_id, message): """ Verifies the peers authentication. """
logger.debug('message: {}'.format(dump(message))) signed_octets = message + self.Ni + prf(self.SK_pr, peer_id._data) auth_type = const.AuthenticationType(struct.unpack(const.AUTH_HEADER, auth_data[:4])[0]) assert auth_type == const.AuthenticationType.RSA logger.debug(dump(auth_data)) try: return pubkey.verify(signed_octets, auth_data[4:], 'tests/peer.pem') except pubkey.VerifyError: raise IkeError("Remote peer authentication failed.")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def auth_recv(self): """ Handle peer's IKE_AUTH response. """
id_r = auth_data = None for p in self.packets[-1].payloads: if p._type == payloads.Type.IDr: id_r = p logger.debug('Got responder ID: {}'.format(dump(bytes(p)))) if p._type == payloads.Type.AUTH: auth_data = p._data if p._type == payloads.Type.SA: logger.debug('ESP_SPIin: {}'.format(p.spi)) self.esp_SPIin = p.spi for proposal in p.proposals: logger.debug("Proposal: {}".format(proposal.__dict__)) logger.debug(proposal.spi) if id_r is None or auth_data is None: raise IkeError('IDr missing from IKE_AUTH response') message2 = bytes(self.packets[1]) authenticated = self.authenticate_peer(auth_data, id_r, message2) assert authenticated keymat = prfplus(self.SK_d, self.Ni + self.Nr, 4 * 32) (self.esp_ei, self.esp_ai, self.esp_er, self.esp_ar, ) = unpack("32s" * 4, keymat) # TODO: Figure out the names for the params, they _ARE_ in correct places, just the names migth mismatch. self.install_ipsec_sas()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_payload(self, payload): """ Adds a payload to packet, updating last payload's next_payload field """
if self.payloads: self.payloads[-1].next_payload = payload._type self.payloads.append(payload)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_caller_module(): """ Returns the name of the caller's module as a string. '__main__' """
stack = inspect.stack() assert len(stack) > 1 caller = stack[2][0] return caller.f_globals['__name__']
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def uniform(self, key, min_value=0., max_value=1.): """Returns a random number between min_value and max_value"""
return min_value + self._random(key) * (max_value - min_value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def perlin(self, key, **kwargs): """Return perlin noise seede with the specified key. For parameters, check the PerlinNoise class."""
if hasattr(key, "encode"): key = key.encode('ascii') value = zlib.adler32(key, self.seed) return PerlinNoise(value, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_no_proxy_errors(self, **kwargs): """ Helper method to determine if the proxies logged any major errors related to the functioning of the proxy itself """
data = self._es.search(body={ "size": max_query_results, "query": { "filtered": { "query": { "match_all": {} }, "filter": { "term": { "level": "error" } } } } }) # if self.debug: # print(data) return GremlinTestResult(data["hits"]["total"] == 0, data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_requests_with_errors(self): """ Helper method to determine if proxies logged any error related to the requests passing through"""
data = self._es.search(body={ "size": max_query_results, "query": { "filtered": { "query": { "match_all": {} }, "filter": { "exists": { "field": "errmsg" } } } } }) return GremlinTestResult(False, data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_assertions(self, checklist, all=False): """Check a set of assertions @param all boolean if False, stop at first failure @return: False if any assertion fails. """
assert isinstance(checklist, dict) and 'checks' in checklist retval = None retlist = [] for assertion in checklist['checks']: retval = self.check_assertion(**assertion) retlist.append(retval) if not retval.success and not all: print "Error message:", retval[3] return retlist return retlist
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def download(odir: Path, source_url: str, irng: Sequence[int]): """Download star index files. The default range was useful for my cameras. """
assert len(irng) == 2, 'specify start, stop indices' odir = Path(odir).expanduser() odir.mkdir(parents=True, exist_ok=True) ri = int(source_url.split('/')[-2][:2]) for i in range(*irng): fn = f'index-{ri:2d}{i:02d}.fits' url = f'{source_url}{fn}' ofn = odir / fn if ofn.is_file(): # no clobber print('skipping', ofn) continue print(f'{url} => {ofn}', end='\r') urlretrieve(url, ofn)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def exclude_match(exclude, link_value): """ Check excluded value against the link's current value """
if hasattr(exclude, "search") and exclude.search(link_value): return True if exclude == link_value: return True return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def seoify_hyperlink(hyperlink): """Modify a hyperlink to make it SEO-friendly by replacing hyphens with spaces and trimming multiple spaces. :param hyperlink: URL to attempt to grab SEO from """
last_slash = hyperlink.rfind('/') return re.sub(r' +|-', ' ', hyperlink[last_slash + 1:])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find(self, limit=None, reverse=False, sort=None, exclude=None, duplicates=True, pretty=False, **filters): """ Using filters and sorts, this finds all hyperlinks on a web page :param limit: Crop results down to limit specified :param reverse: Reverse the list of links, useful for before limiting :param exclude: Remove links from list :param duplicates: Determines if identical URLs should be displayed :param pretty: Quick and pretty formatting using pprint :param filters: All the links to search for """
if exclude is None: exclude = [] if 'href' not in filters: filters['href'] = True search = self._soup.findAll('a', **filters) if reverse: search.reverse() links = [] for anchor in search: build_link = anchor.attrs try: build_link[u'seo'] = seoify_hyperlink(anchor['href']) except KeyError: pass try: build_link[u'text'] = anchor.string or build_link['seo'] except KeyError: pass ignore_link = False for nixd in exclude: for key, value in six.iteritems(nixd): if key in build_link: if (isinstance(build_link[key], collections.Iterable) and not isinstance(build_link[key], six.string_types)): for item in build_link[key]: ignore_link = exclude_match(value, item) else: ignore_link = exclude_match(value, build_link[key]) if not duplicates: for link in links: if link['href'] == anchor['href']: ignore_link = True if not ignore_link: links.append(build_link) if limit is not None and len(links) == limit: break if sort is not None: links = sorted(links, key=sort, reverse=reverse) if pretty: pp = pprint.PrettyPrinter(indent=4) return pp.pprint(links) else: return links
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_plot(fn: Path, cm, ax, alpha=1): """Astrometry.net makes file ".new" with the image and the WCS SIP 2-D polynomial fit coefficients in the FITS header We use DECL as "x" and RA as "y". pcolormesh() is used as it handles arbitrary pixel shapes. Note that pcolormesh() cannot tolerate NaN in X or Y (NaN in C is OK). This is handled in https://github.com/scivision/pcolormesh_nan.py. """
with fits.open(fn, mode='readonly', memmap=False) as f: img = f[0].data yPix, xPix = f[0].shape[-2:] x, y = np.meshgrid(range(xPix), range(yPix)) # pixel indices to find RA/dec of xy = np.column_stack((x.ravel(order='C'), y.ravel(order='C'))) radec = wcs.WCS(f[0].header).all_pix2world(xy, 0) ra = radec[:, 0].reshape((yPix, xPix), order='C') dec = radec[:, 1].reshape((yPix, xPix), order='C') ax.set_title(fn.name) ax.pcolormesh(ra, dec, img, alpha=alpha, cmap=cm, norm=LogNorm()) ax.set_ylabel('Right Ascension [deg.]') ax.set_xlabel('Declination [deg.]')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def dump(src): """ Returns data in hex format in groups of 4 octets delimited by spaces for debugging purposes. """
return b' '.join(binascii.hexlify(bytes(x)) for x in zip(src[::4], src[1::4], src[2::4], src[3::4]))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def index(): """ Display productpage with normal user and test user buttons"""
global productpage table = json2html.convert(json = json.dumps(productpage), table_attributes="class=\"table table-condensed table-bordered table-hover\"") return render_template('index.html', serviceTable=table)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def frange(start, end, step): """A range implementation which can handle floats"""
if start <= end: step = abs(step) else: step = -abs(step) while start < end: yield start start += step
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def gridrange(start, end, step): """Generate a grid of complex numbers"""
for x in frange(start.real, end.real, step.real): for y in frange(start.imag, end.imag, step.imag): yield x + y * 1j
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pad(data, blocksize=16): """ Pads data to blocksize according to RFC 4303. Pad length field is included in output. """
padlen = blocksize - len(data) % blocksize return bytes(data + bytearray(range(1, padlen)) + bytearray((padlen - 1,)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def doSolve(fitsfn: Path, args: str=None): """ Astrometry.net from at least version 0.67 is OK with Python 3. """
# binpath = Path(find_executable('solve-field')).parent opts = args.split(' ') if args else [] # %% build command cmd = ['solve-field', '--overwrite', str(fitsfn)] cmd += opts print('\n', ' '.join(cmd), '\n') # %% execute ret = subprocess.check_output(cmd, universal_newlines=True) # solve-field returns 0 even if it didn't solve! print(ret) if 'Did not solve' in ret: raise RuntimeError(f'could not solve {fitsfn}') print('\n\n *** done with astrometry.net ***\n ')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_rgb(hsv): """Converts a color from HSV to a hex RGB. HSV should be in range 0..1, though hue wraps around. Output is a hexadecimal color value as used by CSS, HTML and SVG"""
r, g, b = [int(min(255, max(0, component * 256))) for component in colorsys.hsv_to_rgb(*hsv)] return "%02x%02x%02x" % (r, g, b)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def verbosityToLogLevel(verbosity): """ Returns the specfied verbosity level interpreted as a logging level. """
ret = 0 if verbosity == 1: ret = logging.INFO elif verbosity >= 2: ret = logging.DEBUG return ret
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def addVariantSearchOptions(parser): """ Adds common options to a variant searches command line parser. """
addVariantSetIdArgument(parser) addReferenceNameArgument(parser) addCallSetIdsArgument(parser) addStartArgument(parser) addEndArgument(parser) addPageSizeArgument(parser)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def addAnnotationsSearchOptions(parser): """ Adds common options to a annotation searches command line parser. """
addAnnotationSetIdArgument(parser) addReferenceNameArgument(parser) addReferenceIdArgument(parser) addStartArgument(parser) addEndArgument(parser) addEffectsArgument(parser) addPageSizeArgument(parser)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def addFeaturesSearchOptions(parser): """ Adds common options to a features search command line parser. """
addFeatureSetIdArgument(parser) addFeaturesReferenceNameArgument(parser) addStartArgument(parser) addEndArgument(parser) addParentFeatureIdArgument(parser) addFeatureTypesArgument(parser)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def addContinuousSearchOptions(parser): """ Adds common options to a continuous search command line parser. """
addContinuousSetIdArgument(parser) addContinuousReferenceNameArgument(parser) addStartArgument(parser) addEndArgument(parser)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def addGenotypePhenotypeSearchOptions(parser): """ Adds options to a g2p searches command line parser. """
parser.add_argument( "--phenotype_association_set_id", "-s", default=None, help="Only return associations from this phenotype_association_set.") parser.add_argument( "--feature_ids", "-f", default=None, help="Only return associations for these features.") parser.add_argument( "--phenotype_ids", "-p", default=None, help="Only return associations for these phenotypes.") parser.add_argument( "--evidence", "-E", default=None, help="Only return associations to this evidence.")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def addPhenotypeSearchOptions(parser): """ Adds options to a phenotype searches command line parser. """
parser.add_argument( "--phenotype_association_set_id", "-s", default=None, help="Only return phenotypes from this phenotype_association_set.") parser.add_argument( "--phenotype_id", "-p", default=None, help="Only return this phenotype.") parser.add_argument( "--description", "-d", default=None, help="Only return phenotypes matching this description.") parser.add_argument( "--age_of_onset", "-a", default=None, help="Only return phenotypes with this age_of_onset.") parser.add_argument( "--type", "-T", default=None, help="Only return phenotypes with this type.")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _textOutput(self, gaObjects): """ Outputs a text summary of the specified protocol objects, one per line. """
for gaObject in gaObjects: if hasattr(gaObject, 'name'): print(gaObject.id, gaObject.name, sep="\t") else: print(gaObject.id, sep="\t")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getAllVariantSets(self): """ Returns all variant sets on the server. """
for dataset in self.getAllDatasets(): iterator = self._client.search_variant_sets( dataset_id=dataset.id) for variantSet in iterator: yield variantSet
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getAllFeatureSets(self): """ Returns all feature sets on the server. """
for dataset in self.getAllDatasets(): iterator = self._client.search_feature_sets( dataset_id=dataset.id) for featureSet in iterator: yield featureSet
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getAllContinuousSets(self): """ Returns all continuous sets on the server. """
for dataset in self.getAllDatasets(): iterator = self._client.search_continuous_sets( dataset_id=dataset.id) for continuousSet in iterator: yield continuousSet
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getAllReadGroupSets(self): """ Returns all readgroup sets on the server. """
for dataset in self.getAllDatasets(): iterator = self._client.search_read_group_sets( dataset_id=dataset.id) for readGroupSet in iterator: yield readGroupSet
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getAllReadGroups(self): """ Get all read groups in a read group set """
for dataset in self.getAllDatasets(): iterator = self._client.search_read_group_sets( dataset_id=dataset.id) for readGroupSet in iterator: readGroupSet = self._client.get_read_group_set( readGroupSet.id) for readGroup in readGroupSet.read_groups: yield readGroup.id
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getAllAnnotationSets(self): """ Returns all variant annotation sets on the server. """
for variantSet in self.getAllVariantSets(): iterator = self._client.search_variant_annotation_sets( variant_set_id=variantSet.id) for variantAnnotationSet in iterator: yield variantAnnotationSet
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _run(self, referenceGroupId, referenceId=None): """ automatically guess reference id if not passed """
# check if we can get reference id from rg if referenceId is None: referenceId = self._referenceId if referenceId is None: rg = self._client.get_read_group( read_group_id=referenceGroupId) iterator = self._client.search_references(rg.reference_set_id) for reference in iterator: self._run(referenceGroupId, reference.id) else: iterator = self._client.search_reads( read_group_ids=[referenceGroupId], reference_id=referenceId, start=self._start, end=self._end) self._output(iterator)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run(self): """ Iterate passed read group ids, or go through all available read groups """
if not self._readGroupIds: for referenceGroupId in self.getAllReadGroups(): self._run(referenceGroupId) else: for referenceGroupId in self._readGroupIds: self._run(referenceGroupId)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def merge_all_sections(prnt_sctns, child_sctns, style): """ Merge the doc-sections of the parent's and child's attribute into a single docstring. Parameters prnt_sctns: OrderedDict[str, Union[None,str]] child_sctns: OrderedDict[str, Union[None,str]] Returns ------- str Output docstring of the merged docstrings."""
doc = [] prnt_only_raises = prnt_sctns["Raises"] and not (prnt_sctns["Returns"] or prnt_sctns["Yields"]) if prnt_only_raises and (child_sctns["Returns"] or child_sctns["Yields"]): prnt_sctns["Raises"] = None for key in prnt_sctns: sect = merge_section(key, prnt_sctns[key], child_sctns[key], style) if sect is not None: doc.append(sect) return "\n\n".join(doc) if doc else None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def merge_numpy_napoleon_docs(prnt_doc=None, child_doc=None): """ Merge two numpy-style docstrings into a single docstring, according to napoleon docstring sections. Given the numpy-style docstrings from a parent and child's attributes, merge the docstring sections such that the child's section is used, wherever present, otherwise the parent's section is used. Any whitespace that can be uniformly removed from a docstring's second line and onwards is removed. Sections will be separated by a single blank line. Aliased docstring sections are normalized. E.g Args, Arguments -> Parameters Parameters prnt_doc: Optional[str] The docstring from the parent. child_doc: Optional[str] The docstring from the child. Returns ------- Union[str, None] The merged docstring. """
style = "numpy" return merge_all_sections(parse_napoleon_doc(prnt_doc, style), parse_napoleon_doc(child_doc, style), style)