text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Removes parenthethical and dashed phrases <END_TASK> <USER_TASK:> Description: def without_extra_phrases(self): """Removes parenthethical and dashed phrases"""
# the last parenthesis is optional, because sometimes they are truncated name = re.sub(r'\s*\([^)]*\)?\s*$', '', self.name) name = re.sub(r'(?i)\s* formerly.*$', '', name) name = re.sub(r'(?i)\s*and its affiliates$', '', name) name = re.sub(r'\bet al\b', '', name) # in some datasets, the name of an organization is followed by a hyphen and an abbreviated name, or a specific # department or geographic subdivision; we want to remove this extraneous stuff without breaking names like # Wal-Mart or Williams-Sonoma # if there's a hyphen at least four characters in, proceed if "-" in name: hyphen_parts = name.rsplit("-", 1) # if the part after the hyphen is shorter than the part before, # AND isn't either a number (often occurs in Union names) or a single letter (e.g., Tech-X), # AND the hyphen is preceded by either whitespace or at least four characters, # discard the hyphen and whatever follows if len(hyphen_parts[1]) < len(hyphen_parts[0]) and re.search(r'(\w{4,}|\s+)$', hyphen_parts[0]) and not re.match(r'^([a-zA-Z]|[0-9]+)$', hyphen_parts[1]): name = hyphen_parts[0].strip() return name
<SYSTEM_TASK:> The 'kernel' is an attempt to get at just the most pithy words in the name <END_TASK> <USER_TASK:> Description: def kernel(self): """ The 'kernel' is an attempt to get at just the most pithy words in the name """
stop_words = [ y.lower() for y in self.abbreviations.values() + self.filler_words ] kernel = ' '.join([ x for x in self.expand().split() if x.lower() not in stop_words ]) # this is a hack to get around the fact that this is the only two-word phrase we want to block # amongst our stop words. if we end up with more, we may need a better way to do this kernel = re.sub(r'\s*United States', '', kernel) return kernel
<SYSTEM_TASK:> This detects common family name prefixes and joins them to the last name, <END_TASK> <USER_TASK:> Description: def detect_and_fix_two_part_surname(self, args): """ This detects common family name prefixes and joins them to the last name, so names like "De Kuyper" don't end up with "De" as a middle name. """
i = 0 while i < len(args) - 1: if args[i].lower() in self.family_name_prefixes: args[i] = ' '.join(args[i:i+2]) del(args[i+1]) break else: i += 1
<SYSTEM_TASK:> Convert all the parts of the name to the proper case... carefully! <END_TASK> <USER_TASK:> Description: def case_name_parts(self): """ Convert all the parts of the name to the proper case... carefully! """
if not self.is_mixed_case(): self.honorific = self.honorific.title() if self.honorific else None self.nick = self.nick.title() if self.nick else None if self.first: self.first = self.first.title() self.first = self.capitalize_and_punctuate_initials(self.first) if self.last: self.last = self.last.title() self.last = self.uppercase_the_scots(self.last) self.middle = self.middle.title() if self.middle else None if self.suffix: # Title case Jr/Sr, but uppercase roman numerals if re.match(r'(?i).*[js]r', self.suffix): self.suffix = self.suffix.title() else: self.suffix = self.suffix.upper() return self
<SYSTEM_TASK:> Return Shapely Polygon from coordinates. <END_TASK> <USER_TASK:> Description: def _polygon_from_coords(coords, fix_geom=False, swap=True, dims=2): """ Return Shapely Polygon from coordinates. - coords: list of alterating latitude / longitude coordinates - fix_geom: automatically fix geometry """
assert len(coords) % dims == 0 number_of_points = len(coords)/dims coords_as_array = np.array(coords) reshaped = coords_as_array.reshape(number_of_points, dims) points = [ (float(i[1]), float(i[0])) if swap else ((float(i[0]), float(i[1]))) for i in reshaped.tolist() ] polygon = Polygon(points).buffer(0) try: assert polygon.is_valid return polygon except AssertionError: if fix_geom: return polygon.buffer(0) else: raise RuntimeError("Geometry is not valid.")
<SYSTEM_TASK:> Return the path of all granules of a given band. <END_TASK> <USER_TASK:> Description: def granule_paths(self, band_id): """Return the path of all granules of a given band."""
band_id = str(band_id).zfill(2) try: assert isinstance(band_id, str) assert band_id in BAND_IDS except AssertionError: raise AttributeError( "band ID not valid: %s" % band_id ) return [ granule.band_path(band_id) for granule in self.granules ]
<SYSTEM_TASK:> Find and return footprint as Shapely Polygon. <END_TASK> <USER_TASK:> Description: def footprint(self): """Find and return footprint as Shapely Polygon."""
# Check whether product or granule footprint needs to be calculated. tile_geocoding = self._metadata.iter("Tile_Geocoding").next() resolution = 10 searchstring = ".//*[@resolution='%s']" % resolution size, geoposition = tile_geocoding.findall(searchstring) nrows, ncols = (int(i.text) for i in size) ulx, uly, xdim, ydim = (int(i.text) for i in geoposition) lrx = ulx + nrows * resolution lry = uly - ncols * resolution utm_footprint = box(ulx, lry, lrx, uly) project = partial( pyproj.transform, pyproj.Proj(init=self.srid), pyproj.Proj(init='EPSG:4326') ) footprint = transform(project, utm_footprint).buffer(0) return footprint
<SYSTEM_TASK:> Return cloudmask as a shapely geometry. <END_TASK> <USER_TASK:> Description: def cloudmask(self): """Return cloudmask as a shapely geometry."""
polys = list(self._get_mask(mask_type="MSK_CLOUDS")) return MultiPolygon([ poly["geometry"] for poly in polys if poly["attributes"]["maskType"] == "OPAQUE" ]).buffer(0)
<SYSTEM_TASK:> Return paths of given band's jp2 files for all granules. <END_TASK> <USER_TASK:> Description: def band_path(self, band_id, for_gdal=False, absolute=False): """Return paths of given band's jp2 files for all granules."""
band_id = str(band_id).zfill(2) if not isinstance(band_id, str) or band_id not in BAND_IDS: raise ValueError("band ID not valid: %s" % band_id) if self.dataset.is_zip and for_gdal: zip_prefix = "/vsizip/" if absolute: granule_basepath = zip_prefix + os.path.dirname(os.path.join( self.dataset.path, self.dataset.product_metadata_path )) else: granule_basepath = zip_prefix + os.path.dirname( self.dataset.product_metadata_path ) else: if absolute: granule_basepath = os.path.dirname(os.path.join( self.dataset.path, self.dataset.product_metadata_path )) else: granule_basepath = os.path.dirname( self.dataset.product_metadata_path ) product_org = self.dataset._product_metadata.iter( "Product_Organisation").next() granule_item = [ g for g in chain(*[gl for gl in product_org.iter("Granule_List")]) if self.granule_identifier == g.attrib["granuleIdentifier"] ] if len(granule_item) != 1: raise S2ReaderMetadataError( "Granule ID cannot be found in product metadata." ) rel_path = [ f.text for f in granule_item[0].iter() if f.text[-2:] == band_id ] if len(rel_path) != 1: # Apparently some SAFE files don't contain all bands. In such a # case, raise a warning and return None. warnings.warn( "%s: image path to band %s could not be extracted" % ( self.dataset.path, band_id ) ) return img_path = os.path.join(granule_basepath, rel_path[0]) + ".jp2" # Above solution still fails on the "safe" test dataset. Therefore, # the path gets checked if it contains the IMG_DATA folder and if not, # try to guess the path from the old schema. Not happy with this but # couldn't find a better way yet. if "IMG_DATA" in img_path: return img_path else: if self.dataset.is_zip: zip_prefix = "/vsizip/" granule_basepath = zip_prefix + os.path.join( self.dataset.path, self.granule_path) else: granule_basepath = self.granule_path return os.path.join( os.path.join(granule_basepath, "IMG_DATA"), "".join([ "_".join((self.granule_identifier).split("_")[:-1]), "_B", band_id, ".jp2" ]) )
<SYSTEM_TASK:> Transform from geo coordinates to pixel coordinates <END_TASK> <USER_TASK:> Description: def geo_to_pixel(geo, level): """Transform from geo coordinates to pixel coordinates"""
lat, lon = float(geo[0]), float(geo[1]) lat = TileSystem.clip(lat, TileSystem.LATITUDE_RANGE) lon = TileSystem.clip(lon, TileSystem.LONGITUDE_RANGE) x = (lon + 180) / 360 sin_lat = sin(lat * pi / 180) y = 0.5 - log((1 + sin_lat) / (1 - sin_lat)) / (4 * pi) # might need to cast to uint map_size = TileSystem.map_size(level) pixel_x = int(TileSystem.clip(x * map_size + 0.5, (0, map_size - 1))) pixel_y = int(TileSystem.clip(y * map_size + 0.5, (0, map_size - 1))) # print '\n'+str( ((lat, lon), sin_lat, (x, y), map_size, (pixel_x, # pixel_y)) )+'\n' return pixel_x, pixel_y
<SYSTEM_TASK:> Transform from pixel to geo coordinates <END_TASK> <USER_TASK:> Description: def pixel_to_geo(pixel, level): """Transform from pixel to geo coordinates"""
pixel_x = pixel[0] pixel_y = pixel[1] map_size = float(TileSystem.map_size(level)) x = (TileSystem.clip(pixel_x, (0, map_size - 1)) / map_size) - 0.5 y = 0.5 - (TileSystem.clip(pixel_y, (0, map_size - 1)) / map_size) lat = 90 - 360 * atan(exp(-y * 2 * pi)) / pi lon = 360 * x return round(lat, 6), round(lon, 6)
<SYSTEM_TASK:> Transform tile coordinates to a quadkey <END_TASK> <USER_TASK:> Description: def tile_to_quadkey(tile, level): """Transform tile coordinates to a quadkey"""
tile_x = tile[0] tile_y = tile[1] quadkey = "" for i in xrange(level): bit = level - i digit = ord('0') mask = 1 << (bit - 1) # if (bit - 1) > 0 else 1 >> (bit - 1) if (tile_x & mask) is not 0: digit += 1 if (tile_y & mask) is not 0: digit += 2 quadkey += chr(digit) return quadkey
<SYSTEM_TASK:> return user authorize url <END_TASK> <USER_TASK:> Description: def authorize_url(self, state=''): """ return user authorize url """
url = 'https://openapi.youku.com/v2/oauth2/authorize?' params = { 'client_id': self.client_id, 'response_type': 'code', 'state': state, 'redirect_uri': self.redirect_uri } return url + urlencode(params)
<SYSTEM_TASK:> remove item from dict if value is None. <END_TASK> <USER_TASK:> Description: def remove_none_value(data): """remove item from dict if value is None. return new dict. """
return dict((k, v) for k, v in data.items() if v is not None)
<SYSTEM_TASK:> A function which attempts downloads and uncompresses the latest version of an openfmri.fmri dataset. <END_TASK> <USER_TASK:> Description: def get_dataset(ds,dataDir,removecompressed=1): """ A function which attempts downloads and uncompresses the latest version of an openfmri.fmri dataset. PARAMETERS :ds: dataset number of the openfMRI.org dataset (integer) without zero padding. I.e. can just be 212 (doesn't need to be 000212). :dataDir: where to save the data. Will get saved in 'dataDir/openfmri/ds000XXX' :removecompressed: delete compressed data once unzipped. 1=yes. 0=no. NOTES There is no "default" way to download data from openfMRI so this solution is a little hacky. It may not be a universal functoin and it is best to verify that all necessary data has been downloaded. """
#Convert input ds to string incase it is put in via function ds = str(ds) #The final character of the dataset can be a letter lettersuffix='' if re.search('[A-Za-z]$',ds): lettersuffix = ds[-1] ds = ds[:-1] openfMRI_dataset_string = '{0:06d}'.format(int(ds)) + lettersuffix #Some datasets include try: os.mkdir(dataDir) except: pass datasetDir = os.path.join(dataDir, 'openfmri/') try: os.mkdir(datasetDir) except: pass openfMRI_url = 'https://openfmri.org/dataset/ds' + openfMRI_dataset_string + '/' r = urlopen(openfMRI_url).read() soup = BeautifulSoup(r,'lxml') #Isolate only the links from the latest revision. The text "data associated with revision". If the website changes its static text, this needs to be changed unformatted_soup=soup.prettify() firstOccurance=unformatted_soup.find('Data Associated with Revision') secondOccurancce=unformatted_soup[firstOccurance+1:].find('Data Associated with Revision') #If there is only one "Data Associated..." (i.e. only one revision) this returns -1. This should be kept. Otherwise add on the firstOccurance index if secondOccurancce != -1: secondOccurancce+=firstOccurance #The latest links are confined within this part of the text soup_latestversion = BeautifulSoup(unformatted_soup[firstOccurance:secondOccurancce],'lxml') # Loop through all links and dowload files filelist = [] for a in soup_latestversion.find_all('a', href=True): #This assumes that all files include ds.... if re.search('ds[A-Za-z_0-9.-]*$',a['href']): filename_start=re.search('ds[A-Za-z_0-9.-]*$',a['href']).start() filelist.append(a['href'][filename_start:]) print('Downloading: ' + a['href'][filename_start:]) urlretrieve(a['href'],datasetDir + a['href'][filename_start:]) print('--- Download complete ---') for f in filelist: untar_or_unzip(datasetDir,f) print('--- Uncompressing complete ---') if removecompressed==1: for f in filelist: print('Clean up. Deleting: ' + f) os.remove(datasetDir+f) print('--- Clean up complete ---') print('NOTE: It is best to verify manually that all the correct data has been downloaded and uncompressed correctly. \n If data is used in any publication, see openfmri.org about how to appropriately cite/credit the data.') print('--- Script complete ---')
<SYSTEM_TASK:> util method for create video params to upload. <END_TASK> <USER_TASK:> Description: def prepare_video_params(self, title=None, tags='Others', description='', copyright_type='original', public_type='all', category=None, watch_password=None, latitude=None, longitude=None, shoot_time=None ): """ util method for create video params to upload. Only need to provide a minimum of two essential parameters: title and tags, other video params are optional. All params spec see: http://cloud.youku.com/docs?id=110#create . Args: title: string, 2-50 characters. tags: string, 1-10 tags joind with comma. description: string, less than 2000 characters. copyright_type: string, 'original' or 'reproduced' public_type: string, 'all' or 'friend' or 'password' watch_password: string, if public_type is password. latitude: double. longitude: double. shoot_time: datetime. Returns: dict params that upload/create method need. """
params = {} if title is None: title = self.file_name elif len(title) > 80: title = title[:80] if len(description) > 2000: description = description[0:2000] params['title'] = title params['tags'] = tags params['description'] = description params['copyright_type'] = copyright_type params['public_type'] = public_type if category: params['category'] = category if watch_password: params['watch_password'] = watch_password if latitude: params['latitude'] = latitude if longitude: params['longitude'] = longitude if shoot_time: params['shoot_time'] = shoot_time return params
<SYSTEM_TASK:> if create and create_file has execute, save upload state <END_TASK> <USER_TASK:> Description: def _save_upload_state_to_file(self): """if create and create_file has execute, save upload state to file for next resume upload if current upload process is interrupted. """
if os.access(self.file_dir, os.W_OK | os.R_OK | os.X_OK): save_file = self.file + '.upload' data = { 'upload_token': self.upload_token, 'upload_server_ip': self.upload_server_ip } with open(save_file, 'w') as f: json.dump(data, f)
<SYSTEM_TASK:> start uploading the file until upload is complete or error. <END_TASK> <USER_TASK:> Description: def upload(self, params={}): """start uploading the file until upload is complete or error. This is the main method to used, If you do not care about state of process. Args: params: a dict object describe video info, eg title, tags, description, category. all video params see the doc of prepare_video_params. Returns: return video_id if upload successfully """
if self.upload_token is not None: # resume upload status = self.check() if status['status'] != 4: return self.commit() else: self.new_slice() while self.slice_task_id != 0: self.upload_slice() return self.commit() else: # new upload self.create(self.prepare_video_params(**params)) self.create_file() self.new_slice() while self.slice_task_id != 0: self.upload_slice() return self.commit()
<SYSTEM_TASK:> Synchronize LDAP users with local user model. <END_TASK> <USER_TASK:> Description: def sync_users(self): """Synchronize LDAP users with local user model."""
if self.settings.USER_FILTER: user_attributes = self.settings.USER_ATTRIBUTES.keys() + self.settings.USER_EXTRA_ATTRIBUTES ldap_users = self.ldap.search(self.settings.USER_FILTER, user_attributes) self._sync_ldap_users(ldap_users) logger.info("Users are synchronized")
<SYSTEM_TASK:> If node is ancestor of self <END_TASK> <USER_TASK:> Description: def is_ancestor(self, node): """ If node is ancestor of self Get the difference in level If not, None """
if self.level <= node.level or self.key[:len(node.key)] != node.key: return None return self.level - node.level
<SYSTEM_TASK:> Generator <END_TASK> <USER_TASK:> Description: def xdifference(self, to): """ Generator Gives the difference of quadkeys between self and to Generator in case done on a low level Only works with quadkeys of same level """
x,y = 0,1 assert self.level == to.level self_tile = list(self.to_tile()[0]) to_tile = list(to.to_tile()[0]) if self_tile[x] >= to_tile[x] and self_tile[y] <= self_tile[y]: ne_tile, sw_tile = self_tile, to_tile else: sw_tile, ne_tile = self_tile, to_tile cur = ne_tile[:] while cur[x] >= sw_tile[x]: while cur[y] <= sw_tile[y]: yield from_tile(tuple(cur), self.level) cur[y] += 1 cur[x] -= 1 cur[y] = ne_tile[y]
<SYSTEM_TASK:> Get a list of all ancestors in descending order of level, including a new instance of self <END_TASK> <USER_TASK:> Description: def unwind(self): """ Get a list of all ancestors in descending order of level, including a new instance of self """
return [ QuadKey(self.key[:l+1]) for l in reversed(range(len(self.key))) ]
<SYSTEM_TASK:> Apply validation rules for loaded settings. <END_TASK> <USER_TASK:> Description: def validate(self): """Apply validation rules for loaded settings."""
if self.GROUP_ATTRIBUTES and self.GROUPNAME_FIELD not in self.GROUP_ATTRIBUTES.values(): raise ImproperlyConfigured("LDAP_SYNC_GROUP_ATTRIBUTES must contain '%s'" % self.GROUPNAME_FIELD) if not self.model._meta.get_field(self.USERNAME_FIELD).unique: raise ImproperlyConfigured("LDAP_SYNC_USERNAME_FIELD '%s' must be unique" % self.USERNAME_FIELD) if self.USER_ATTRIBUTES and self.USERNAME_FIELD not in self.USER_ATTRIBUTES.values(): raise ImproperlyConfigured("LDAP_SYNC_USER_ATTRIBUTES must contain '%s'" % self.USERNAME_FIELD)
<SYSTEM_TASK:> Query the configured LDAP server. <END_TASK> <USER_TASK:> Description: def search(self, filterstr, attrlist): """Query the configured LDAP server."""
return self._paged_search_ext_s(self.settings.BASE, ldap.SCOPE_SUBTREE, filterstr=filterstr, attrlist=attrlist, page_size=self.settings.PAGE_SIZE)
<SYSTEM_TASK:> Fetch a location's corresponding LID. <END_TASK> <USER_TASK:> Description: def fetch_lid(self, woeid): """Fetch a location's corresponding LID. Args: woeid: (string) the location's WOEID. Returns: a string containing the requested LID or None if the LID could not be found. Raises: urllib.error.URLError: urllib.request could not open the URL (Python 3). urllib2.URLError: urllib2 could not open the URL (Python 2). xml.etree.ElementTree.ParseError: xml.etree.ElementTree failed to parse the XML document. """
rss = self._fetch_xml(LID_LOOKUP_URL.format(woeid, "f")) # We are pulling the LID from the permalink tag in the XML file # returned by Yahoo. try: link = rss.find("channel/link").text except AttributeError: return None # use regex or string.split # regex assumes the format XXXXNNNN for the LID. # string.split works more general of the context. lid = re.search("[A-Za-z]{4}[0-9]{4}", link).group() # lid = link.split("/forecast/")[1].split("_")[0] return lid
<SYSTEM_TASK:> Fetch a location's corresponding WOEID. <END_TASK> <USER_TASK:> Description: def fetch_woeid(self, location): """Fetch a location's corresponding WOEID. Args: location: (string) a location (e.g. 23454 or Berlin, Germany). Returns: a string containing the location's corresponding WOEID or None if the WOEID could not be found. Raises: urllib.error.URLError: urllib.request could not open the URL (Python 3). urllib2.URLError: urllib2 could not open the URL (Python 2). xml.etree.ElementTree.ParseError: xml.etree.ElementTree failed to parse the XML document. """
rss = self._fetch_xml( WOEID_LOOKUP_URL.format(quote(location))) try: woeid = rss.find("results/Result/woeid").text except AttributeError: return None return woeid
<SYSTEM_TASK:> Convert wind direction from degrees to compass direction. <END_TASK> <USER_TASK:> Description: def _degrees_to_direction(self, degrees): """Convert wind direction from degrees to compass direction."""
try: degrees = float(degrees) except ValueError: return None if degrees < 0 or degrees > 360: return None if degrees <= 11.25 or degrees >= 348.76: return "N" elif degrees <= 33.75: return "NNE" elif degrees <= 56.25: return "NE" elif degrees <= 78.75: return "ENE" elif degrees <= 101.25: return "E" elif degrees <= 123.75: return "ESE" elif degrees <= 146.25: return "SE" elif degrees <= 168.75: return "SSE" elif degrees <= 191.25: return "S" elif degrees <= 213.75: return "SSW" elif degrees <= 236.25: return "SW" elif degrees <= 258.75: return "WSW" elif degrees <= 281.25: return "W" elif degrees <= 303.75: return "WNW" elif degrees <= 326.25: return "NW" elif degrees <= 348.75: return "NNW" else: return None
<SYSTEM_TASK:> Fetch a url and parse the document's XML. <END_TASK> <USER_TASK:> Description: def _fetch_xml(self, url): """Fetch a url and parse the document's XML."""
with contextlib.closing(urlopen(url)) as f: return xml.etree.ElementTree.parse(f).getroot()
<SYSTEM_TASK:> Get a specific canned value <END_TASK> <USER_TASK:> Description: def show_value(self, value): """ Get a specific canned value :type value: str :param value: Canned value to show :rtype: dict :return: A dictionnary containing canned value description """
values = self.get_values() values = [x for x in values if x['label'] == value] if len(values) == 0: raise Exception("Unknown value") else: return values[0]
<SYSTEM_TASK:> Get a specific canned key <END_TASK> <USER_TASK:> Description: def show_key(self, value): """ Get a specific canned key :type value: str :param value: Canned key to show :rtype: dict :return: A dictionnary containing canned key description """
keys = self.get_keys() keys = [x for x in keys if x['label'] == value] if len(keys) == 0: raise Exception("Unknown key") else: return keys[0]
<SYSTEM_TASK:> Download the original image. <END_TASK> <USER_TASK:> Description: def download(self, dest_pattern="{originalFilename}", override=True, parent=False): """ Download the original image. Parameters ---------- dest_pattern : str, optional Destination path for the downloaded image. "{X}" patterns are replaced by the value of X attribute if it exists. override : bool, optional True if a file with same name can be overrided by the new file. parent : bool, optional True to download image parent if the image is a part of a multidimensional file. Returns ------- downloaded : bool True if everything happens correctly, False otherwise. """
if self.id is None: raise ValueError("Cannot download image with no ID.") pattern = re.compile("{(.*?)}") dest_pattern = re.sub(pattern, lambda m: str(getattr(self, str(m.group(0))[1:-1], "_")), dest_pattern) parameters = {"parent": parent} destination = os.path.dirname(dest_pattern) if not os.path.exists(destination): os.makedirs(destination) return Cytomine.get_instance().download_file("{}/{}/download".format(self.callback_identifier, self.id), dest_pattern, override, parameters)
<SYSTEM_TASK:> Download the image with optional image modifications. <END_TASK> <USER_TASK:> Description: def dump(self, dest_pattern="{id}.jpg", override=True, max_size=None, bits=8, contrast=None, gamma=None, colormap=None, inverse=None): """ Download the image with optional image modifications. Parameters ---------- dest_pattern : str, optional Destination path for the downloaded image. "{X}" patterns are replaced by the value of X attribute if it exists. override : bool, optional True if a file with same name can be overrided by the new file. max_size : int, tuple, optional Maximum size (width or height) of returned image. None to get original size. bits : int (8,16,32) or str ("max"), optional Bit depth (bit per channel) of returned image. "max" returns the original image bit depth contrast : float, optional Optional contrast applied on returned image. gamma : float, optional Optional gamma applied on returned image. colormap : int, optional Cytomine identifier of a colormap to apply on returned image. inverse : bool, optional True to inverse color mapping, False otherwise. Returns ------- downloaded : bool True if everything happens correctly, False otherwise. As a side effect, object attribute "filename" is filled with downloaded file path. """
if self.id is None: raise ValueError("Cannot dump an annotation with no ID.") pattern = re.compile("{(.*?)}") dest_pattern = re.sub(pattern, lambda m: str(getattr(self, str(m.group(0))[1:-1], "_")), dest_pattern) destination = os.path.dirname(dest_pattern) filename, extension = os.path.splitext(os.path.basename(dest_pattern)) extension = extension[1:] if extension not in ("jpg", "png", "tif", "tiff"): extension = "jpg" if not os.path.exists(destination): os.makedirs(destination) if isinstance(max_size, tuple) or max_size is None: max_size = max(self.width, self.height) parameters = { "maxSize": max_size, "contrast": contrast, "gamma": gamma, "colormap": colormap, "inverse": inverse, "bits": bits } file_path = os.path.join(destination, "{}.{}".format(filename, extension)) url = self.preview[:self.preview.index("?")] url = url.replace(".png", ".{}".format(extension)) result = Cytomine.get_instance().download_file(url, file_path, override, parameters) if result: self.filename = file_path return result
<SYSTEM_TASK:> Returns path to folder holding generated artifact for given element. <END_TASK> <USER_TASK:> Description: def folder_path_for_package(cls, package: ecore.EPackage): """Returns path to folder holding generated artifact for given element."""
parent = package.eContainer() if parent: return os.path.join(cls.folder_path_for_package(parent), package.name) return package.name
<SYSTEM_TASK:> Determines which classifiers have to be imported into given package. <END_TASK> <USER_TASK:> Description: def imported_classifiers_package(p: ecore.EPackage): """Determines which classifiers have to be imported into given package."""
classes = {c for c in p.eClassifiers if isinstance(c, ecore.EClass)} references = itertools.chain(*(c.eAllReferences() for c in classes)) references_types = (r.eType for r in references) imported = {c for c in references_types if getattr(c, 'ePackage', p) is not p} imported_dict = {} for classifier in imported: imported_dict.setdefault(classifier.ePackage, set()).add(classifier) return imported_dict
<SYSTEM_TASK:> Determines which classifiers have to be imported into given module. <END_TASK> <USER_TASK:> Description: def imported_classifiers(p: ecore.EPackage): """Determines which classifiers have to be imported into given module."""
classes = {c for c in p.eClassifiers if isinstance(c, ecore.EClass)} supertypes = itertools.chain(*(c.eAllSuperTypes() for c in classes)) imported = {c for c in supertypes if c.ePackage is not p} attributes = itertools.chain(*(c.eAttributes for c in classes)) attributes_types = (a.eType for a in attributes) imported |= {t for t in attributes_types if t.ePackage not in {p, ecore.eClass, None}} imported_dict = {} for classifier in imported: imported_dict.setdefault(classifier.ePackage, set()).add(classifier) return imported_dict
<SYSTEM_TASK:> Returns classes in package in ordered by number of bases. <END_TASK> <USER_TASK:> Description: def classes(p: ecore.EPackage): """Returns classes in package in ordered by number of bases."""
classes = (c for c in p.eClassifiers if isinstance(c, ecore.EClass)) return sorted(classes, key=lambda c: len(set(c.eAllSuperTypes())))
<SYSTEM_TASK:> Returns Python form of fully qualified name. <END_TASK> <USER_TASK:> Description: def filter_pyfqn(cls, value, relative_to=0): """ Returns Python form of fully qualified name. Args: relative_to: If greater 0, the returned path is relative to the first n directories. """
def collect_packages(element, packages): parent = element.eContainer() if parent: collect_packages(parent, packages) packages.append(element.name) packages = [] collect_packages(value, packages) if relative_to < 0 or relative_to > len(packages): raise ValueError('relative_to not in range of number of packages') fqn = '.'.join(packages[relative_to:]) if relative_to: fqn = '.' + fqn return cls.module_path_map.get(fqn, fqn)
<SYSTEM_TASK:> Return a new Jinja environment. <END_TASK> <USER_TASK:> Description: def create_environment(self, **kwargs): """ Return a new Jinja environment. Derived classes may override method to pass additional parameters or to change the template loader type. """
environment = super().create_environment(**kwargs) environment.tests.update({ 'type': self.test_type, 'kind': self.test_kind, 'opposite_before_self': self.test_opposite_before_self, }) environment.filters.update({ 'docstringline': self.filter_docstringline, 'pyquotesingle': self.filter_pyquotesingle, 'derivedname': self.filter_derived_name, 'refqualifiers': self.filter_refqualifiers, 'attrqualifiers': self.filter_attrqualifiers, 'supertypes': self.filter_supertypes, 'all_contents': self.filter_all_contents, 'pyfqn': self.filter_pyfqn, 're_sub': lambda v, p, r: re.sub(p, r, v), 'set': self.filter_set, }) from pyecore import ecore environment.globals.update({'ecore': ecore}) return environment
<SYSTEM_TASK:> Generate model code. <END_TASK> <USER_TASK:> Description: def generate(self, model, outfolder, *, exclude=None): """ Generate model code. Args: model: The meta-model to generate code for. outfolder: Path to the directoty that will contain the generated code. exclude: List of referenced resources for which code was already generated (to prevent regeneration). """
with pythonic_names(): super().generate(model, outfolder) check_dependency = self.with_dependencies and model.eResource if check_dependency: if exclude is None: exclude = set() resource = model.eResource # the current resource had been managed and is excluded from further generations exclude.add(resource) rset = resource.resource_set direct_resources = {r for r in rset.resources.values() if r not in exclude} for resource in direct_resources: self.generate(resource.contents[0], outfolder, exclude=exclude)
<SYSTEM_TASK:> Get information about a group <END_TASK> <USER_TASK:> Description: def show_group(self, group_id): """ Get information about a group :type group_id: int :param group_id: Group ID Number :rtype: dict :return: a dictionary containing group information """
res = self.post('loadGroups', {'groupId': group_id}) if isinstance(res, list): return _fix_group(res[0]) else: return _fix_group(res)
<SYSTEM_TASK:> Get the group's organism permissions <END_TASK> <USER_TASK:> Description: def get_organism_permissions(self, group): """ Get the group's organism permissions :type group: str :param group: group name :rtype: list :return: a list containing organism permissions (if any) """
data = { 'name': group, } response = _fix_group(self.post('getOrganismPermissionsForGroup', data)) return response
<SYSTEM_TASK:> Get the group's admins <END_TASK> <USER_TASK:> Description: def get_group_admin(self, group): """ Get the group's admins :type group: str :param group: group name :rtype: list :return: a list containing group admins """
data = { 'name': group, } response = _fix_group(self.post('getGroupAdmin', data)) return response
<SYSTEM_TASK:> Get the group's creator <END_TASK> <USER_TASK:> Description: def get_group_creator(self, group): """ Get the group's creator :type group: str :param group: group name :rtype: list :return: creator userId """
data = { 'name': group, } response = _fix_group(self.post('getGroupCreator', data)) return response
<SYSTEM_TASK:> Add an attribute to a feature <END_TASK> <USER_TASK:> Description: def add_attribute(self, feature_id, attribute_key, attribute_value, organism=None, sequence=None): """ Add an attribute to a feature :type feature_id: str :param feature_id: Feature UUID :type attribute_key: str :param attribute_key: Attribute Key :type attribute_value: str :param attribute_value: Attribute Value :type organism: str :param organism: Organism Common Name :type sequence: str :param sequence: Sequence Name This seems to show two attributes being added, but it behaves like those two are one. :rtype: dict :return: A standard apollo feature dictionary ({"features": [{...}]}) """
data = { 'features': [ { 'uniquename': feature_id, 'non_reserved_properties': [ { 'tag': attribute_key, 'value': attribute_value, } ] } ] } data = self._update_data(data, organism, sequence) return self.post('addAttribute', data)
<SYSTEM_TASK:> Add a dbxref to a feature <END_TASK> <USER_TASK:> Description: def add_dbxref(self, feature_id, db, accession, organism=None, sequence=None): """ Add a dbxref to a feature :type feature_id: str :param feature_id: Feature UUID :type db: str :param db: DB Name (e.g. PMID) :type accession: str :param accession: Accession Value :type organism: str :param organism: Organism Common Name :type sequence: str :param sequence: Sequence Name This seems to show two attributes being added, but it behaves like those two are one. :rtype: dict :return: A standard apollo feature dictionary ({"features": [{...}]}) """
data = { 'features': [ { 'uniquename': feature_id, 'dbxrefs': [ { 'db': db, 'accession': accession, } ] } ] } data = self._update_data(data, organism, sequence) return self.post('addDbxref', data)
<SYSTEM_TASK:> Apollo likes to return empty user arrays, even when you REALLY <END_TASK> <USER_TASK:> Description: def _handle_empty(self, user, response): """Apollo likes to return empty user arrays, even when you REALLY want a user response back... like creating a user."""
if len(response.keys()) == 0: response = self.show_user(user) # And sometimes show_user can return nothing. Ask again... if len(response) == 0: response = self.show_user(user) return response
<SYSTEM_TASK:> Get a specific user <END_TASK> <USER_TASK:> Description: def show_user(self, user): """ Get a specific user :type user: str :param user: User Email :rtype: dict :return: a dictionary containing user information """
res = self.post('loadUsers', {'userId': user}) if isinstance(res, list) and len(res) > 0: res = res[0] return _fix_user(res)
<SYSTEM_TASK:> Require that the user has an account <END_TASK> <USER_TASK:> Description: def require_user(wa, email): """Require that the user has an account"""
cache_key = 'user-list' try: # Get the cached value data = userCache[cache_key] except KeyError: # If we hit a key error above, indicating that # we couldn't find the key, we'll simply re-request # the data data = wa.users.loadUsers() userCache[cache_key] = data return AssertUser([x for x in data if x.username == email])
<SYSTEM_TASK:> Get the list of organisms accessible to a user, filtered by `orgs` <END_TASK> <USER_TASK:> Description: def accessible_organisms(user, orgs): """Get the list of organisms accessible to a user, filtered by `orgs`"""
permission_map = { x['organism']: x['permissions'] for x in user.organismPermissions if 'WRITE' in x['permissions'] or 'READ' in x['permissions'] or 'ADMINISTRATE' in x['permissions'] or user.role == 'ADMIN' } if 'error' in orgs: raise Exception("Error received from Apollo server: \"%s\"" % orgs['error']) return [ (org['commonName'], org['id'], False) for org in sorted(orgs, key=lambda x: x['commonName']) if org['commonName'] in permission_map ]
<SYSTEM_TASK:> Connect the client with the given host and the provided credentials. <END_TASK> <USER_TASK:> Description: def connect(cls, host, public_key, private_key, verbose=0, use_cache=True): """ Connect the client with the given host and the provided credentials. Parameters ---------- host : str The Cytomine host (without protocol). public_key : str The Cytomine public key. private_key : str The Cytomine private key. verbose : int The verbosity level of the client. use_cache : bool True to use HTTP cache, False otherwise. Returns ------- client : Cytomine A connected Cytomine client. """
return cls(host, public_key, private_key, verbose, use_cache)
<SYSTEM_TASK:> Connect with data taken from a command line interface. <END_TASK> <USER_TASK:> Description: def connect_from_cli(cls, argv, use_cache=True): """ Connect with data taken from a command line interface. Parameters ---------- argv: list Command line parameters (executable name excluded) use_cache : bool True to use HTTP cache, False otherwise. Returns ------- client : Cytomine A connected Cytomine client. Notes ----- If some parameters are invalid, the function stops the execution and displays an help. """
argparse = cls._add_cytomine_cli_args(ArgumentParser()) params, _ = argparse.parse_known_args(args=argv) log_level = params.verbose if params.log_level is not None: log_level = logging.getLevelName(params.log_level) return cls.connect(params.host, params.public_key, params.private_key, log_level, use_cache=use_cache)
<SYSTEM_TASK:> Process the provided host and protocol to return them in a standardized <END_TASK> <USER_TASK:> Description: def _parse_url(host, provided_protocol=None): """ Process the provided host and protocol to return them in a standardized way that can be subsequently used by Cytomine methods. If the protocol is not specified, HTTP is the default. Only HTTP and HTTPS schemes are supported. Parameters ---------- host: str The host, with or without the protocol provided_protocol: str ("http", "http://", "https", "https://") The default protocol - used only if the host value does not specify one Return ------ (host, protocol): tuple The host and protocol in a standardized way (host without protocol, and protocol in ("http", "https")) Examples -------- >>> Cytomine._parse_url("localhost-core") ("localhost-core", "http") >>> Cytomine._parse_url("https://demo.cytomine.coop", "http") ("demo.cytomine.coop", "https") """
protocol = "http" # default protocol if host.startswith("http://"): protocol = "http" elif host.startswith("https://"): protocol = "https" elif provided_protocol is not None: provided_protocol = provided_protocol.replace("://", "") if provided_protocol in ("http", "https"): protocol = provided_protocol host = host.replace("http://", "").replace("https://", "") if host.endswith("/"): host = host[:-1] return host, protocol
<SYSTEM_TASK:> Upload the crop associated with an annotation as a new image. <END_TASK> <USER_TASK:> Description: def upload_crop(self, ims_host, filename, id_annot, id_storage, id_project=None, sync=False, protocol=None): """ Upload the crop associated with an annotation as a new image. Parameters ---------- ims_host: str Cytomine IMS host, with or without the protocol filename: str Filename to give to the newly created image id_annot: int Identifier of the annotation to crop id_storage: int Identifier of the storage to use to upload the new image id_project: int, optional Identifier of a project in which the new image should be added sync: bool, optional True: the server will answer once the uploaded file is deployed (response will include the created image) False (default): the server will answer as soon as it receives the file protocol: str ("http", "http://", "https", "https://") The default protocol - used only if the host value does not specify one Return ------ uf: UploadedFile The uploaded file. Its images attribute is populated with the collection of created abstract images. """
if not protocol: protocol = self._protocol ims_host, protocol = self._parse_url(ims_host, protocol) ims_host = "{}://{}".format(protocol, ims_host) query_parameters = { "annotation" : id_annot, "storage": id_storage, "cytomine": "{}://{}".format(self._protocol, self._host), "name": filename, "sync": sync } if id_project: query_parameters["project"] = id_project response = self._session.post("{}/uploadCrop".format(ims_host), auth=CytomineAuth( self._public_key, self._private_key, ims_host, ""), headers=self._headers(), params=query_parameters) if response.status_code == requests.codes.ok: uf = self._process_upload_response(response.json()) self._logger.info("Image crop uploaded successfully to {}".format(ims_host)) return uf else: self._logger.error("Error during crop upload. Response: %s", response) return False
<SYSTEM_TASK:> Get a specific canned comment <END_TASK> <USER_TASK:> Description: def show_comment(self, value): """ Get a specific canned comment :type value: str :param value: Canned comment to show :rtype: dict :return: A dictionnary containing canned comment description """
comments = self.get_comments() comments = [x for x in comments if x['comment'] == value] if len(comments) == 0: raise Exception("Unknown comment") else: return comments[0]
<SYSTEM_TASK:> Command line wrappers around Apollo functions. While this sounds <END_TASK> <USER_TASK:> Description: def arrow(ctx, apollo_instance, verbose, log_level): """Command line wrappers around Apollo functions. While this sounds unexciting, with arrow and jq you can easily build powerful command line scripts."""
set_logging_level(log_level) # We abuse this, knowing that calls to one will fail. try: ctx.gi = get_apollo_instance(apollo_instance) except TypeError: pass # ctx.log("Could not access Galaxy instance configuration") ctx.verbose = verbose
<SYSTEM_TASK:> Load json data, allowing - to represent stdin. <END_TASK> <USER_TASK:> Description: def json_loads(data): """Load json data, allowing - to represent stdin."""
if data is None: return "" if data == "-": return json.load(sys.stdin) elif os.path.exists(data): with open(data, 'r') as handle: return json.load(handle) else: return json.loads(data)
<SYSTEM_TASK:> Filters the list of log objects to display <END_TASK> <USER_TASK:> Description: def get_queryset(self, **options): """ Filters the list of log objects to display """
days = options.get('days') queryset = TimelineLog.objects.order_by('-timestamp') if days: try: start = timezone.now() - timedelta(days=days) except TypeError: raise CommandError("Incorrect 'days' parameter. 'days' must be a number of days.") else: return queryset.filter(timestamp__gte=start) return queryset
<SYSTEM_TASK:> Figures out the recipients <END_TASK> <USER_TASK:> Description: def get_recipients(self, **options): """ Figures out the recipients """
if options['recipients_from_setting']: return settings.TIMELINE_DIGEST_EMAIL_RECIPIENTS users = get_user_model()._default_manager.all() if options['staff']: users = users.filter(is_staff=True) elif not options['all']: users = users.filter(is_staff=True, is_superuser=True) return users.values_list(settings.TIMELINE_USER_EMAIL_FIELD, flat=True)
<SYSTEM_TASK:> Accepts an array of actions and returns an array of actions which match. <END_TASK> <USER_TASK:> Description: def expand_actions(self, actions): """Accepts an array of actions and returns an array of actions which match. This should be called before "matches?" and other checking methods since they rely on the actions to be expanded."""
results = list() for action in actions: if action in self.aliased_actions: results.append(action) for item in self.expand_actions(self.aliased_actions[action]): results.append(item) else: results.append(action) return results
<SYSTEM_TASK:> Connect to the Cytomine server and switch to job connection <END_TASK> <USER_TASK:> Description: def start(self): """ Connect to the Cytomine server and switch to job connection Incurs dataflows """
run_by_ui = False if not self.current_user.algo: # If user connects as a human (CLI execution) self._job = Job(self._project.id, self._software.id).save() user_job = User().fetch(self._job.userJob) self.set_credentials(user_job.publicKey, user_job.privateKey) else: # If the user executes the job through the Cytomine interface self._job = Job().fetch(self.current_user.job) run_by_ui = True # set job state to RUNNING self._job.status = Job.RUNNING self._job.update() # add software parameters if not run_by_ui and self._parameters is not None: parameters = vars(self._parameters) for software_param in self._software.parameters: name = software_param["name"] if name in parameters: value = parameters[name] else: value = software_param["defaultParamValue"] JobParameter(self._job.id, software_param["id"], value).save()
<SYSTEM_TASK:> Notify the Cytomine server of the job's end <END_TASK> <USER_TASK:> Description: def close(self, value): """ Notify the Cytomine server of the job's end Incurs a dataflows """
if value is None: status = Job.TERMINATED status_comment = "Job successfully terminated" else: status = Job.FAILED status_comment = str(value)[:255] self._job.status = status self._job.statusComment = status_comment self._job.update()
<SYSTEM_TASK:> Select the right URI implementation regarding the Ecore model path schema. <END_TASK> <USER_TASK:> Description: def select_uri_implementation(ecore_model_path): """Select the right URI implementation regarding the Ecore model path schema."""
if URL_PATTERN.match(ecore_model_path): return pyecore.resources.resource.HttpURI return pyecore.resources.URI
<SYSTEM_TASK:> Load a single Ecore model and return the root package. <END_TASK> <USER_TASK:> Description: def load_model(ecore_model_path): """Load a single Ecore model and return the root package."""
rset = pyecore.resources.ResourceSet() uri_implementation = select_uri_implementation(ecore_model_path) resource = rset.get_resource(uri_implementation(ecore_model_path)) return resource.contents[0]
<SYSTEM_TASK:> Checks if a given user has the ability to perform the action on a subject <END_TASK> <USER_TASK:> Description: def can(user, action, subject): """Checks if a given user has the ability to perform the action on a subject :param user: A user object :param action: an action string, typically 'read', 'edit', 'manage'. Use bouncer.constants for readability :param subject: the resource in question. Either a Class or an instance of a class. Pass the class if you want to know if the user has general access to perform the action on that type of object. Or pass a specific object, if you want to know if the user has the ability to that specific instance :returns: Boolean """
ability = Ability(user, get_authorization_method()) return ability.can(action, subject)
<SYSTEM_TASK:> Similar to ``can`` but will raise a AccessDenied Exception if does not have access <END_TASK> <USER_TASK:> Description: def ensure(user, action, subject): """ Similar to ``can`` but will raise a AccessDenied Exception if does not have access"""
ability = Ability(user, get_authorization_method()) if ability.cannot(action, subject): raise AccessDenied()
<SYSTEM_TASK:> Download the annotation crop, with optional image modifications. <END_TASK> <USER_TASK:> Description: def dump(self, dest_pattern="{id}.jpg", override=True, mask=False, alpha=False, bits=8, zoom=None, max_size=None, increase_area=None, contrast=None, gamma=None, colormap=None, inverse=None): """ Download the annotation crop, with optional image modifications. Parameters ---------- dest_pattern : str, optional Destination path for the downloaded image. "{X}" patterns are replaced by the value of X attribute if it exists. override : bool, optional True if a file with same name can be overrided by the new file. mask : bool, optional True if a binary mask based on given annotations must be returned, False otherwise. alpha : bool, optional True if image background (outside annotations) must be transparent, False otherwise. zoom : int, optional Optional image zoom number bits : int (8,16,32) or str ("max"), optional Bit depth (bit per channel) of returned image. "max" returns the original image bit depth max_size : int, tuple, optional Maximum size (width or height) of returned image. None to get original size. increase_area : float, optional Increase the crop size. For example, an annotation whose bounding box size is (w,h) will have a crop dimension of (w*increase_area, h*increase_area). contrast : float, optional Optional contrast applied on returned image. gamma : float, optional Optional gamma applied on returned image. colormap : int, optional Cytomine identifier of a colormap to apply on returned image. inverse : bool, optional True to inverse color mapping, False otherwise. Returns ------- downloaded : bool True if everything happens correctly, False otherwise. As a side effect, object attribute "filename" is filled with downloaded file path. """
if self.id is None: raise ValueError("Cannot dump an annotation with no ID.") pattern = re.compile("{(.*?)}") dest_pattern = re.sub(pattern, lambda m: str(getattr(self, str(m.group(0))[1:-1], "_")), dest_pattern) destination = os.path.dirname(dest_pattern) filename, extension = os.path.splitext(os.path.basename(dest_pattern)) extension = extension[1:] if extension not in ("jpg", "png", "tif", "tiff"): extension = "jpg" if not os.path.exists(destination): os.makedirs(destination) parameters = { "zoom": zoom, "maxSize": max_size, "increaseArea": increase_area, "contrast": contrast, "gamma": gamma, "colormap": colormap, "inverse": inverse, "bits": bits } if mask and alpha: image = "alphamask" if extension == "jpg": extension = "png" elif mask: image = "mask" else: image = "crop" file_path = os.path.join(destination, "{}.{}".format(filename, extension)) url = self.cropURL.replace("crop.jpg", "{}.{}".format(image, extension)) result = Cytomine.get_instance().download_file(url, file_path, override, parameters) if result: self.filename = file_path return result
<SYSTEM_TASK:> Set the sequence for subsequent requests. Mostly used in client scripts to avoid passing the sequence and organism on every function call. <END_TASK> <USER_TASK:> Description: def cli(ctx, organism, sequence): """Set the sequence for subsequent requests. Mostly used in client scripts to avoid passing the sequence and organism on every function call. Output: None """
return ctx.gi.annotations.set_sequence(organism, sequence)
<SYSTEM_TASK:> The path attribute returns a stringified, concise representation of <END_TASK> <USER_TASK:> Description: def path(self): """The path attribute returns a stringified, concise representation of the MultiFieldSelector. It can be reversed by the ``from_path`` constructor. """
if len(self.heads) == 1: return _fmt_mfs_path(self.heads.keys()[0], self.heads.values()[0]) else: return "(" + "|".join( _fmt_mfs_path(k, v) for (k, v) in self.heads.items() ) + ")"
<SYSTEM_TASK:> Creates a copy of the passed object which only contains the parts <END_TASK> <USER_TASK:> Description: def get(self, obj): """Creates a copy of the passed object which only contains the parts which are pointed to by one of the FieldSelectors that were used to construct the MultiFieldSelector. Can be used to produce 'filtered' versions of objects. """
ctor = type(obj) if isinstance(obj, (list, ListCollection)): if self.has_string: raise TypeError( "MultiFieldSelector has string in list collection context" ) if self.has_none: tail = self.heads[None] vals = list(self._get(x, tail) for x in obj) else: vals = list( self._get(obj[head], tail) for head, tail in self.heads.iteritems() ) if isinstance(obj, ListCollection): return ctor(values=vals) else: return vals elif isinstance(obj, (dict, DictCollection)): if self.has_none: tail = self.heads[None] return ctor( (k, self._get(v, tail)) for k, v in obj.iteritems() ) else: return ctor( (head, self._get(obj[head], tail)) for head, tail in self.heads.iteritems() if head in obj ) else: if self.has_int or (self.has_none and self.heads[None] is not all): raise TypeError( "MultiFieldSelector has %s in %s context" % ( "int" if self.has_int else "none", ctor.__name__ ) ) if self.has_none: return self._get(obj, all) else: kwargs = dict() for head, tail in self.heads.iteritems(): val = getattr(obj, head, None) if val is not None: kwargs[head] = self._get(val, tail) return ctor(**kwargs)
<SYSTEM_TASK:> Deletes all of the fields at the specified locations. <END_TASK> <USER_TASK:> Description: def delete(self, obj, force=False): """Deletes all of the fields at the specified locations. args: ``obj=``\ *OBJECT* the object to remove the fields from ``force=``\ *BOOL* if True, missing attributes do not raise errors. Otherwise, the first failure raises an exception without making any changes to ``obj``. """
# TODO: this could be a whole lot more efficient! if not force: for fs in self: try: fs.get(obj) except FieldSelectorException: raise for fs in self: try: fs.delete(obj) except FieldSelectorException: pass
<SYSTEM_TASK:> Finds and closes all processes of `socat`. <END_TASK> <USER_TASK:> Description: def reset_socat(use_sudo=False): """ Finds and closes all processes of `socat`. :param use_sudo: Use `sudo` command. As Docker-Fabric does not run `socat` with `sudo`, this is by default set to ``False``. Setting it to ``True`` could unintentionally remove instances from other users. :type use_sudo: bool """
output = stdout_result('ps -o pid -C socat', quiet=True) pids = output.split('\n')[1:] puts("Removing process(es) with id(s) {0}.".format(', '.join(pids))) which = sudo if use_sudo else run which('kill {0}'.format(' '.join(pids)), quiet=True)
<SYSTEM_TASK:> Shows version information of the remote Docker service, similar to ``docker version``. <END_TASK> <USER_TASK:> Description: def version(): """ Shows version information of the remote Docker service, similar to ``docker version``. """
output = docker_fabric().version() col_len = max(map(len, output.keys())) + 1 puts('') for k, v in six.iteritems(output): fastprint('{0:{1}} {2}'.format(''.join((k, ':')), col_len, v), end='\n', flush=False) fastprint('', flush=True)
<SYSTEM_TASK:> Lists images on the Docker remote host, similar to ``docker images``. <END_TASK> <USER_TASK:> Description: def list_images(list_all=False, full_ids=False): """ Lists images on the Docker remote host, similar to ``docker images``. :param list_all: Lists all images (e.g. dependencies). Default is ``False``, only shows named images. :type list_all: bool :param full_ids: Shows the full ids. When ``False`` (default) only shows the first 12 characters. :type full_ids: bool """
images = docker_fabric().images(all=list_all) _format_output_table(images, IMAGE_COLUMNS, full_ids)
<SYSTEM_TASK:> Lists containers on the Docker remote host, similar to ``docker ps``. <END_TASK> <USER_TASK:> Description: def list_containers(list_all=True, short_image=True, full_ids=False, full_cmd=False): """ Lists containers on the Docker remote host, similar to ``docker ps``. :param list_all: Shows all containers. Default is ``False``, which omits exited containers. :type list_all: bool :param short_image: Hides the repository prefix for preserving space. Default is ``True``. :type short_image: bool :param full_ids: Shows the full image ids. When ``False`` (default) only shows the first 12 characters. :type full_ids: bool :param full_cmd: Shows the full container command. When ``False`` (default) only shows the first 25 characters. :type full_cmd: bool """
containers = docker_fabric().containers(all=list_all) _format_output_table(containers, CONTAINER_COLUMNS, full_ids, full_cmd, short_image)
<SYSTEM_TASK:> Lists networks on the Docker remote host, similar to ``docker network ls``. <END_TASK> <USER_TASK:> Description: def list_networks(full_ids=False): """ Lists networks on the Docker remote host, similar to ``docker network ls``. :param full_ids: Shows the full network ids. When ``False`` (default) only shows the first 12 characters. :type full_ids: bool """
networks = docker_fabric().networks() _format_output_table(networks, NETWORK_COLUMNS, full_ids)
<SYSTEM_TASK:> Removes all containers that have finished running. Similar to the ``prune`` functionality in newer Docker versions. <END_TASK> <USER_TASK:> Description: def cleanup_containers(**kwargs): """ Removes all containers that have finished running. Similar to the ``prune`` functionality in newer Docker versions. """
containers = docker_fabric().cleanup_containers(**kwargs) if kwargs.get('list_only'): puts('Existing containers:') for c_id, c_name in containers: fastprint('{0} {1}'.format(c_id, c_name), end='\n')
<SYSTEM_TASK:> Removes all images that have no name, and that are not references as dependency by any other named image. Similar <END_TASK> <USER_TASK:> Description: def cleanup_images(remove_old=False, **kwargs): """ Removes all images that have no name, and that are not references as dependency by any other named image. Similar to the ``prune`` functionality in newer Docker versions, but supports more filters. :param remove_old: Also remove images that do have a name, but no `latest` tag. :type remove_old: bool """
keep_tags = env.get('docker_keep_tags') if keep_tags is not None: kwargs.setdefault('keep_tags', keep_tags) removed_images = docker_fabric().cleanup_images(remove_old=remove_old, **kwargs) if kwargs.get('list_only'): puts('Unused images:') for image_name in removed_images: fastprint(image_name, end='\n')
<SYSTEM_TASK:> Saves a Docker image from the remote to a local files. For performance reasons, uses the Docker command line client <END_TASK> <USER_TASK:> Description: def save_image(image, filename=None): """ Saves a Docker image from the remote to a local files. For performance reasons, uses the Docker command line client on the host, generates a gzip-tarball and downloads that. :param image: Image name or id. :type image: unicode :param filename: File name to store the local file. If not provided, will use ``<image>.tar.gz`` in the current working directory. :type filename: unicode """
local_name = filename or '{0}.tar.gz'.format(image) cli.save_image(image, local_name)
<SYSTEM_TASK:> Uploads an image from a local file to a Docker remote. Note that this temporarily has to extend the service timeout <END_TASK> <USER_TASK:> Description: def load_image(filename, timeout=120): """ Uploads an image from a local file to a Docker remote. Note that this temporarily has to extend the service timeout period. :param filename: Local file name. :type filename: unicode :param timeout: Timeout in seconds to set temporarily for the upload. :type timeout: int """
c = docker_fabric() with open(expand_path(filename), 'r') as f: _timeout = c._timeout c._timeout = timeout try: c.load_image(f) finally: c._timeout = _timeout
<SYSTEM_TASK:> Generator method which returns the differences from the invocant to <END_TASK> <USER_TASK:> Description: def diff_iter(self, other, **kwargs): """Generator method which returns the differences from the invocant to the argument. args: ``other=``\ *Record*\ \|\ *Anything* The thing to compare against; the types must match, unless ``duck_type=True`` is passed. *diff_option*\ =\ *value* Unknown keyword arguments are eventually passed to a :ref:`DiffOptions` constructor. """
from normalize.diff import diff_iter return diff_iter(self, other, **kwargs)
<SYSTEM_TASK:> Parse list of weight assignments. <END_TASK> <USER_TASK:> Description: def _parse_weights(weight_args, default_weight=0.6): """Parse list of weight assignments."""
weights_dict = {} r_group_weight = default_weight for weight_arg in weight_args: for weight_assignment in weight_arg.split(','): if '=' not in weight_assignment: raise ValueError( 'Invalid weight assignment: {}'.format(weight_assignment)) key, value = weight_assignment.split('=', 1) value = float(value) if key == 'R': r_group_weight = value elif key == '*': default_weight = value elif hasattr(Atom, key): weights_dict[Atom(key)] = value else: raise ValueError('Invalid element: {}'.format(key)) return weights_dict, r_group_weight, default_weight
<SYSTEM_TASK:> Combine multiple pair transfers into one. <END_TASK> <USER_TASK:> Description: def _combine_transfers(self, result): """Combine multiple pair transfers into one."""
transfers = {} for reaction_id, c1, c2, form in result: key = reaction_id, c1, c2 combined_form = transfers.setdefault(key, Formula()) transfers[key] = combined_form | form for (reaction_id, c1, c2), form in iteritems(transfers): yield reaction_id, c1, c2, form
<SYSTEM_TASK:> Copies a resource from a container to a compressed tarball and downloads it. <END_TASK> <USER_TASK:> Description: def copy_resource(container, resource, local_filename, contents_only=True): """ Copies a resource from a container to a compressed tarball and downloads it. :param container: Container name or id. :type container: unicode :param resource: Name of resource to copy. :type resource: unicode :param local_filename: Path to store the tarball locally. :type local_filename: unicode :param contents_only: In case ``resource`` is a directory, put all contents at the root of the tar file. If this is set to ``False``, the directory itself will be at the root instead. :type contents_only: bool """
with temp_dir() as remote_tmp: base_name = os.path.basename(resource) copy_path = posixpath.join(remote_tmp, 'copy_tmp') run(mkdir(copy_path, check_if_exists=True)) remote_name = posixpath.join(copy_path, base_name) archive_name = 'container_{0}.tar.gz'.format(container) archive_path = posixpath.join(remote_tmp, archive_name) run('docker cp {0}:{1} {2}'.format(container, resource, copy_path), shell=False) if contents_only and is_directory(remote_name): src_dir = remote_name src_files = '*' else: src_dir = copy_path src_files = base_name with cd(src_dir): run(targz(archive_path, src_files)) get(archive_path, local_filename)
<SYSTEM_TASK:> Saves a Docker image as a compressed tarball. This command line client method is a suitable alternative, if the <END_TASK> <USER_TASK:> Description: def save_image(image, local_filename): """ Saves a Docker image as a compressed tarball. This command line client method is a suitable alternative, if the Remove API method is too slow. :param image: Image id or tag. :type image: unicode :param local_filename: Local file name to store the image into. If this is a directory, the image will be stored there as a file named ``image_<Image name>.tar.gz``. """
r_name, __, i_name = image.rpartition('/') i_name, __, __ = i_name.partition(':') with temp_dir() as remote_tmp: archive = posixpath.join(remote_tmp, 'image_{0}.tar.gz'.format(i_name)) run('docker save {0} | gzip --stdout > {1}'.format(image, archive), shell=False) get(archive, local_filename)
<SYSTEM_TASK:> Iterate over the compound entries in the given file <END_TASK> <USER_TASK:> Description: def parse_compound_file(f, context=None): """Iterate over the compound entries in the given file"""
f.readline() # Skip header for lineno, row in enumerate(csv.reader(f, delimiter='\t')): compound_id, names, formula = row[:3] names = (decode_name(name) for name in names.split(',<br>')) # ModelSEED sometimes uses an asterisk and number at # the end of formulas. This seems to have a similar # meaning as '(...)n'. m = re.match(r'^(.*)\*(\d*)$', formula) if m is not None: if m.group(2) != '': formula = '({}){}'.format(m.group(1), m.group(2)) else: formula = '({})n'.format(m.group(1)) formula = formula.strip() if formula == '' or formula == 'noformula': formula = None mark = FileMark(context, lineno, 0) yield CompoundEntry(compound_id, names, formula, filemark=mark)
<SYSTEM_TASK:> This function calls the ``json_out`` function, if it was specified, <END_TASK> <USER_TASK:> Description: def to_json(self, propval, extraneous=False, to_json_func=None): """This function calls the ``json_out`` function, if it was specified, otherwise continues with JSON conversion of the value in the slot by calling ``to_json_func`` on it. """
if self.json_out: return self.json_out(propval) else: if not to_json_func: from normalize.record.json import to_json to_json_func = to_json return to_json_func(propval, extraneous)
<SYSTEM_TASK:> Parse a compound specification. <END_TASK> <USER_TASK:> Description: def parse_compound(s, global_compartment=None): """Parse a compound specification. If no compartment is specified in the string, the global compartment will be used. """
m = re.match(r'^\|(.*)\|$', s) if m: s = m.group(1) m = re.match(r'^(.+)\[(\S+)\]$', s) if m: compound_id = m.group(1) compartment = m.group(2) else: compound_id = s compartment = global_compartment return Compound(compound_id, compartment=compartment)
<SYSTEM_TASK:> Find a random minimal network of model reactions. <END_TASK> <USER_TASK:> Description: def random_sparse(strategy, prob, obj_reaction, flux_threshold): """Find a random minimal network of model reactions. Given a reaction to optimize and a threshold, delete entities randomly until the flux of the reaction to optimize falls under the threshold. Keep deleting until no more entities can be deleted. It works with two strategies: deleting reactions or deleting genes (reactions related to certain genes). Args: strategy: :class:`.ReactionDeletionStrategy` or :class:`.GeneDeletionStrategy`. prob: :class:`psamm.fluxanalysis.FluxBalanceProblem`. obj_reaction: objective reactions to optimize. flux_threshold: threshold of max reaction flux. """
essential = set() deleted = set() for entity, deleted_reactions in strategy.iter_tests(): if obj_reaction in deleted_reactions: logger.info( 'Marking entity {} as essential because the objective' ' reaction depends on this entity...'.format(entity)) essential.add(entity) continue if len(deleted_reactions) == 0: logger.info( 'No reactions were removed when entity {}' ' was deleted'.format(entity)) deleted.add(entity) strategy.delete(entity, deleted_reactions) continue logger.info('Deleted reactions: {}'.format( ', '.join(deleted_reactions))) constr = [] for r in deleted_reactions: flux_var = prob.get_flux_var(r) c, = prob.prob.add_linear_constraints(flux_var == 0) constr.append(c) logger.info('Trying FBA without reactions {}...'.format( ', '.join(deleted_reactions))) try: prob.maximize(obj_reaction) except fluxanalysis.FluxBalanceError: logger.info( 'FBA is infeasible, marking {} as essential'.format( entity)) for c in constr: c.delete() essential.add(entity) continue logger.debug('Reaction {} has flux {}'.format( obj_reaction, prob.get_flux(obj_reaction))) if prob.get_flux(obj_reaction) < flux_threshold: for c in constr: c.delete() essential.add(entity) logger.info('Entity {} was essential'.format( entity)) else: deleted.add(entity) strategy.delete(entity, deleted_reactions) logger.info('Entity {} was deleted'.format(entity)) return essential, deleted
<SYSTEM_TASK:> Run reaction production check method. <END_TASK> <USER_TASK:> Description: def run_reaction_production_check(self, model, solver, threshold, implicit_sinks=True): """Run reaction production check method."""
prob = solver.create_problem() # Create flux variables v = prob.namespace() for reaction_id in model.reactions: lower, upper = model.limits[reaction_id] v.define([reaction_id], lower=lower, upper=upper) # Build mass balance constraints massbalance_lhs = {compound: 0 for compound in model.compounds} for spec, value in iteritems(model.matrix): compound, reaction_id = spec massbalance_lhs[compound] += v(reaction_id) * value # Create production variables and apply constraints for compound, lhs in iteritems(massbalance_lhs): if implicit_sinks: # The constraint is merely >0 meaning that we have implicit # sinks for all compounds. prob.add_linear_constraints(lhs >= 0) else: prob.add_linear_constraints(lhs == 0) confirmed_production = set() for reaction in model.reactions: if all(c in confirmed_production for c, _ in model.get_reaction_values(reaction)): continue prob.set_objective(v(reaction)) for sense in (lp.ObjectiveSense.Maximize, lp.ObjectiveSense.Minimize): try: result = prob.solve(sense) except lp.SolverError as e: self.fail( 'Failed to solve for compound, reaction: {}, {}:' ' {}'.format(compound, reaction, e)) flux = result.get_value(v(reaction)) for compound, value in model.get_reaction_values(reaction): if compound in confirmed_production: continue production = 0 if sense == lp.ObjectiveSense.Maximize and flux > 0: production = float(value) * flux elif sense == lp.ObjectiveSense.Minimize and flux < 0: production = float(value) * flux if production >= threshold: confirmed_production.add(compound) for compound in sorted(model.compounds): if compound not in confirmed_production: yield compound
<SYSTEM_TASK:> Add constraints to the problem <END_TASK> <USER_TASK:> Description: def add_linear_constraints(self, *relations): """Add constraints to the problem Each constraint is represented by a Relation, and the expression in that relation can be a set expression. """
constraints = [] for relation in relations: if self._check_relation(relation): constraints.append(Constraint(self, None)) else: for name in self._add_constraints(relation): constraints.append(Constraint(self, name)) return constraints
<SYSTEM_TASK:> Set objective expression of the problem. <END_TASK> <USER_TASK:> Description: def set_objective(self, expression): """Set objective expression of the problem."""
if isinstance(expression, numbers.Number): # Allow expressions with no variables as objective, # represented as a number expression = Expression(offset=expression) linear = [] quad = [] # Reset previous objective. for var in self._non_zero_objective: if var not in expression: if not isinstance(var, Product): linear.append((self._variables[var], 0)) else: t = self._variables[var[0]], self._variables[var[1]], 0 quad.append(t) self._non_zero_objective.clear() # Set actual objective values for var, value in expression.values(): if not isinstance(var, Product): self._non_zero_objective.add(var) linear.append((self._variables[var], float(value))) else: if len(var) > 2: raise ValueError('Invalid objective: {}'.format(var)) self._non_zero_objective.add(var) var1 = self._variables[var[0]] var2 = self._variables[var[1]] if var1 == var2: value *= 2 quad.append((var1, var2, float(value))) # We have to build the set of variables to # update so that we can avoid calling set_linear if the set is empty. # This is due to set_linear failing if the input is an empty # iterable. if len(linear) > 0: self._cp.objective.set_linear(linear) if len(quad) > 0: self._cp.objective.set_quadratic_coefficients(quad) if hasattr(self._cp.objective, 'set_offset'): self._cp.objective.set_offset(float(expression.offset))
<SYSTEM_TASK:> Reset problem type to whatever is appropriate. <END_TASK> <USER_TASK:> Description: def _reset_problem_type(self): """Reset problem type to whatever is appropriate."""
# Only need to reset the type after the first solve. This also works # around a bug in Cplex where get_num_binary() is some rare cases # causes a segfault. if self._solve_count > 0: integer_count = 0 for func in (self._cp.variables.get_num_binary, self._cp.variables.get_num_integer, self._cp.variables.get_num_semicontinuous, self._cp.variables.get_num_semiinteger): integer_count += func() integer = integer_count > 0 quad_constr = self._cp.quadratic_constraints.get_num() > 0 quad_obj = self._cp.objective.get_num_quadratic_variables() > 0 if not integer: if quad_constr: new_type = self._cp.problem_type.QCP elif quad_obj: new_type = self._cp.problem_type.QP else: new_type = self._cp.problem_type.LP else: if quad_constr: new_type = self._cp.problem_type.MIQCP elif quad_obj: new_type = self._cp.problem_type.MIQP else: new_type = self._cp.problem_type.MILP logger.debug('Setting problem type to {}...'.format( self._cp.problem_type[new_type])) self._cp.set_problem_type(new_type) else: logger.debug('Problem type is {}'.format( self._cp.problem_type[self._cp.get_problem_type()])) # Force QP/MIQP solver to look for global optimum. We set it here only # for QP/MIQP problems to avoid the warnings generated for other # problem types when this parameter is set. quad_obj = self._cp.objective.get_num_quadratic_variables() > 0 if hasattr(self._cp.parameters, 'optimalitytarget'): target_param = self._cp.parameters.optimalitytarget else: target_param = self._cp.parameters.solutiontarget if quad_obj: target_param.set(target_param.values.optimal_global) else: target_param.set(target_param.values.auto)
<SYSTEM_TASK:> Be sure to implement this method when sub-classing, otherwise you <END_TASK> <USER_TASK:> Description: def copy(self): """Be sure to implement this method when sub-classing, otherwise you will lose any specialization context."""
doppel = type(self)( self.unpack, self.apply, self.collect, self.reduce, apply_empty_slots=self.apply_empty_slots, extraneous=self.extraneous, ignore_empty_string=self.ignore_empty_string, ignore_none=self.ignore_none, visit_filter=self.visit_filter, ) for x in self.cue: doppel.push(x) doppel.seen = self.seen return doppel
<SYSTEM_TASK:> Unpack a value during a 'visit' <END_TASK> <USER_TASK:> Description: def unpack(cls, value, value_type, visitor): """Unpack a value during a 'visit' args: ``value=``\ *object* The instance being visited ``value_type=``\ *RecordType* The expected type of the instance ``visitor=``\ *Visitor* The context/options returns a tuple with two items: ``get_prop=``\ *function* This function should take a :py:class:`normalize.property.Property` instance, and return the slot from the value, or raise ``AttributeError`` or ``KeyError`` if the slot is empty. Returning nothing means that the item has no properties to unpack; ie, it's an opaque type. ``get_item=``\ *generator* This generator should return the tuple protocol used by :py:class:`normalize.coll.Collection`: (K, V) where K can be an ascending integer (for sequences), V (for sets), or something hashable like a string (for dictionaries/maps) """
if issubclass(value_type, Collection): try: generator = value.itertuples() except AttributeError: if isinstance(value, value_type.colltype): generator = value_type.coll_to_tuples(value) else: raise exc.VisitorUnpackError( passed=value, colltype=value_type.colltype.__name__, context=visitor, ) else: generator = None if issubclass(value_type, Record): def propget(prop): return prop.__get__(value) else: propget = None return propget, generator
<SYSTEM_TASK:> apply' is a general place to put a function which is called on <END_TASK> <USER_TASK:> Description: def apply(cls, value, prop, visitor): """'apply' is a general place to put a function which is called on every extant record slot. This is usually the most important function to implement when sub-classing. The default implementation passes through the slot value as-is, but expected exceptions are converted to ``None``. args: ``value=``\ *value*\ \|\ *AttributeError*\ \|\ *KeyError* This is the value currently in the slot, or the Record itself with the ``apply_records`` visitor option. *AttributeError* will only be received if you passed ``apply_empty_slots``, and *KeyError* will be passed if ``parent_obj`` is a ``dict`` (see :py:meth:`Visitor.map_prop` for details about when this might happen) ``prop=``\ *Property*\ \|\ ``None`` This is the :py:class:`normalize.Property` instance which represents the field being traversed. This can be ``None`` when being applied over Collection instances, where the type of the contents is not a Record. ``visitor=``\ *Visitor* This object can be used to inspect parameters of the current run, such as options which control which kinds of values are visited, which fields are being visited and where the function is in relation to the starting point. """
return ( None if isinstance(value, (AttributeError, KeyError)) else value )
<SYSTEM_TASK:> Hook called for each normalize.coll.Collection, after mapping over <END_TASK> <USER_TASK:> Description: def aggregate(self, mapped_coll_generator, coll_type, visitor): """Hook called for each normalize.coll.Collection, after mapping over each of the items in the collection. The default implementation calls :py:meth:`normalize.coll.Collection.tuples_to_coll` with ``coerce=False``, which just re-assembles the collection into a native python collection type of the same type of the input collection. args: ``result_coll_generator=`` *generator func* Generator which returns (key, value) pairs (like :py:meth:`normalize.coll.Collection.itertuples`) ``coll_type=``\ *CollectionType* This is the :py:class:`normalize.coll.Collection`-derived *class* which is currently being reduced. ``visitor=``\ *Visitor* Context/options object """
return coll_type.tuples_to_coll(mapped_coll_generator, coerce=False)
<SYSTEM_TASK:> This reduction is called to combine the mapped slot and collection <END_TASK> <USER_TASK:> Description: def reduce(self, mapped_props, aggregated, value_type, visitor): """This reduction is called to combine the mapped slot and collection item values into a single value for return. The default implementation tries to behave naturally; you'll almost always get a dict back when mapping over a record, and list or some other collection when mapping over collections. If the collection has additional properties which are not ignored (eg, not extraneous, not filtered), then the result will be a dictionary with the results of mapping the properties, and a 'values' key will be added with the result of mapping the items in the collection. args: ``mapped_props=``\ *generator* Iterating over this generator will yield K, V pairs, where K is **the Property object** and V is the mapped value. ``aggregated=``\ *object* This contains whatever ``aggregate`` returned, normally a list. ``value_type=``\ *RecordType* This is the type which is currently being reduced. A :py:class:`normalize.record.Record` subclass ``visitor=``\ *Visitor* Contenxt/options object. """
reduced = None if mapped_props: reduced = dict((k.name, v) for k, v in mapped_props) if issubclass(value_type, Collection) and aggregated is not None: if all(visitor.is_filtered(prop) for prop in value_type.properties.values()): reduced = aggregated else: if reduced.get("values", False): raise exc.VisitorTooSimple( fs=visitor.field_selector, value_type_name=value_type.__name__, visitor=type(self).__name__, ) else: reduced['values'] = aggregated return reduced
<SYSTEM_TASK:> Reflect is for visitors where you are exposing some information <END_TASK> <USER_TASK:> Description: def reflect(cls, X, **kwargs): """Reflect is for visitors where you are exposing some information about the types reachable from a starting type to an external system. For example, a front-end, a REST URL router and documentation framework, an avro schema definition, etc. X can be a type or an instance. This API should be considered **experimental** """
if isinstance(X, type): value = None value_type = X else: value = X value_type = type(X) if not issubclass(value_type, Record): raise TypeError("Cannot reflect on %s" % value_type.__name__) visitor = cls.Visitor( cls.scantypes, cls.propinfo, cls.itemtypes, cls.typeinfo, **kwargs) return cls.map(visitor, value, value_type)
<SYSTEM_TASK:> The common visitor API used by all three visitor implementations. <END_TASK> <USER_TASK:> Description: def map(cls, visitor, value, value_type): """The common visitor API used by all three visitor implementations. args: ``visitor=``\ *Visitor* Visitor options instance: contains the callbacks to use to implement the visiting, as well as traversal & filtering options. ``value=``\ *Object* Object being visited ``value_type=``\ *RecordType* The type object controlling the visiting. """
unpacked = visitor.unpack(value, value_type, visitor) if unpacked == cls.StopVisiting or isinstance( unpacked, cls.StopVisiting ): return unpacked.return_value if isinstance(unpacked, tuple): props, coll = unpacked else: props, coll = unpacked, None # recurse into values for collections if coll: coll_map_generator = cls.map_collection( visitor, coll, value_type, ) mapped_coll = visitor.collect( coll_map_generator, value_type, visitor, ) else: mapped_coll = None # recurse into regular properties mapped_props = None if props: mapped_props = cls.map_record(visitor, props, value_type) elif mapped_coll is None: return visitor.apply(value, None, visitor) return visitor.reduce( mapped_props, mapped_coll, value_type, visitor, )
<SYSTEM_TASK:> Convenience function for returning the right FBA problem instance <END_TASK> <USER_TASK:> Description: def _get_fba_problem(model, tfba, solver): """Convenience function for returning the right FBA problem instance"""
p = FluxBalanceProblem(model, solver) if tfba: p.add_thermodynamic() return p