text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Checks if all incoming parameters meet the expected values. <END_TASK> <USER_TASK:> Description: def read_validate_params(self, request): """ Checks if all incoming parameters meet the expected values. """
self.client = self.client_authenticator.by_identifier_secret(request) self.password = request.post_param("password") self.username = request.post_param("username") self.scope_handler.parse(request=request, source="body") return True
<SYSTEM_TASK:> Validate the incoming request. <END_TASK> <USER_TASK:> Description: def read_validate_params(self, request): """ Validate the incoming request. :param request: The incoming :class:`oauth2.web.Request`. :return: Returns ``True`` if data is valid. :raises: :class:`oauth2.error.OAuthInvalidError` """
self.refresh_token = request.post_param("refresh_token") if self.refresh_token is None: raise OAuthInvalidError( error="invalid_request", explanation="Missing refresh_token in request body") self.client = self.client_authenticator.by_identifier_secret(request) try: access_token = self.access_token_store.fetch_by_refresh_token( self.refresh_token ) except AccessTokenNotFound: raise OAuthInvalidError(error="invalid_request", explanation="Invalid refresh token") refresh_token_expires_at = access_token.refresh_expires_at self.refresh_grant_type = access_token.grant_type if refresh_token_expires_at != 0 and \ refresh_token_expires_at < int(time.time()): raise OAuthInvalidError(error="invalid_request", explanation="Invalid refresh token") self.data = access_token.data self.user_id = access_token.user_id self.scope_handler.parse(request, "body") self.scope_handler.compare(access_token.scopes) return True
<SYSTEM_TASK:> Get value from walking key path with start object obj. <END_TASK> <USER_TASK:> Description: def value_for_keypath(obj, path): """Get value from walking key path with start object obj. """
val = obj for part in path.split('.'): match = re.match(list_index_re, part) if match is not None: val = _extract(val, match.group(1)) if not isinstance(val, list) and not isinstance(val, tuple): raise TypeError('expected list/tuple') index = int(match.group(2)) val = val[index] else: val = _extract(val, part) if val is None: return None return val
<SYSTEM_TASK:> Set attribute value new_value at key path of start object obj. <END_TASK> <USER_TASK:> Description: def set_value_for_keypath(obj, path, new_value, preserve_child = False): """Set attribute value new_value at key path of start object obj. """
parts = path.split('.') last_part = len(parts) - 1 dst = obj for i, part in enumerate(parts): match = re.match(list_index_re, part) if match is not None: dst = _extract(dst, match.group(1)) if not isinstance(dst, list) and not isinstance(dst, tuple): raise TypeError('expected list/tuple') index = int(match.group(2)) if i == last_part: dst[index] = new_value else: dst = dst[index] else: if i != last_part: dst = _extract(dst, part) else: if isinstance(dst, dict): dst[part] = new_value else: if not preserve_child: setattr(dst, part, new_value) else: try: v = getattr(dst, part) except AttributeError: setattr(dst, part, new_value)
<SYSTEM_TASK:> Fill a surface with a linear gradient pattern. <END_TASK> <USER_TASK:> Description: def fill_gradient(surface, color, gradient, rect=None, vertical=True, forward=True): """Fill a surface with a linear gradient pattern. color starting color gradient final color rect area to fill; default is surface's rect vertical True=vertical; False=horizontal forward True=forward; False=reverse See http://www.pygame.org/wiki/GradientCode """
if rect is None: rect = surface.get_rect() x1, x2 = rect.left, rect.right y1, y2 = rect.top, rect.bottom if vertical: h = y2 - y1 else: h = x2 - x1 assert h > 0 if forward: a, b = color, gradient else: b, a = color, gradient rate = (float(b[0] - a[0]) / h, float(b[1] - a[1]) / h, float(b[2] - a[2]) / h) fn_line = pygame.draw.line if vertical: for line in range(y1, y2): color = (min(max(a[0] + (rate[0] * (line - y1)), 0), 255), min(max(a[1] + (rate[1] * (line - y1)), 0), 255), min(max(a[2] + (rate[2] * (line - y1)), 0), 255)) fn_line(surface, color, (x1, line), (x2, line)) else: for col in range(x1, x2): color = (min(max(a[0] + (rate[0] * (col - x1)), 0), 255), min(max(a[1] + (rate[1] * (col - x1)), 0), 255), min(max(a[2] + (rate[2] * (col - x1)), 0), 255)) fn_line(surface, color, (col, y1), (col, y2))
<SYSTEM_TASK:> Tightly bound the current text respecting current padding. <END_TASK> <USER_TASK:> Description: def shrink_wrap(self): """Tightly bound the current text respecting current padding."""
self.frame.size = (self.text_size[0] + self.padding[0] * 2, self.text_size[1] + self.padding[1] * 2)
<SYSTEM_TASK:> Call to have the view layout itself. <END_TASK> <USER_TASK:> Description: def layout(self): """Call to have the view layout itself. Subclasses should invoke this after laying out child views and/or updating its own frame. """
if self.shadowed: shadow_size = theme.current.shadow_size shadowed_frame_size = (self.frame.w + shadow_size, self.frame.h + shadow_size) self.surface = pygame.Surface( shadowed_frame_size, pygame.SRCALPHA, 32) shadow_image = resource.get_image('shadow') self.shadow_image = resource.scale_image(shadow_image, shadowed_frame_size) else: self.surface = pygame.Surface(self.frame.size, pygame.SRCALPHA, 32) self.shadow_image = None
<SYSTEM_TASK:> Apply theme style attributes to this instance and its children. <END_TASK> <USER_TASK:> Description: def stylize(self): """Apply theme style attributes to this instance and its children. This also causes a relayout to occur so that any changes in padding or other stylistic attributes may be handled. """
# do children first in case parent needs to override their style for child in self.children: child.stylize() style = theme.current.get_dict(self) preserve_child = False try: preserve_child = getattr(theme.current, 'preserve_child') except: preserve_child = False for key, val in style.iteritems(): kvc.set_value_for_keypath(self, key, val, preserve_child) self.layout()
<SYSTEM_TASK:> Do not call directly. <END_TASK> <USER_TASK:> Description: def draw(self): """Do not call directly."""
if self.hidden: return False if self.background_color is not None: render.fillrect(self.surface, self.background_color, rect=pygame.Rect((0, 0), self.frame.size)) for child in self.children: if not child.hidden: child.draw() topleft = child.frame.topleft if child.shadowed: shadow_size = theme.current.shadow_size shadow_topleft = (topleft[0] - shadow_size // 2, topleft[1] - shadow_size // 2) self.surface.blit(child.shadow_image, shadow_topleft) self.surface.blit(child.surface, topleft) if child.border_color and child.border_widths is not None: if (type(child.border_widths) is int and child.border_widths > 0): pygame.draw.rect(self.surface, child.border_color, child.frame, child.border_widths) else: tw, lw, bw, rw = child.get_border_widths() tl = (child.frame.left, child.frame.top) tr = (child.frame.right - 1, child.frame.top) bl = (child.frame.left, child.frame.bottom - 1) br = (child.frame.right - 1, child.frame.bottom - 1) if tw > 0: pygame.draw.line(self.surface, child.border_color, tl, tr, tw) if lw > 0: pygame.draw.line(self.surface, child.border_color, tl, bl, lw) if bw > 0: pygame.draw.line(self.surface, child.border_color, bl, br, bw) if rw > 0: pygame.draw.line(self.surface, child.border_color, tr, br, rw) return True
<SYSTEM_TASK:> Return border width for each side top, left, bottom, right. <END_TASK> <USER_TASK:> Description: def get_border_widths(self): """Return border width for each side top, left, bottom, right."""
if type(self.border_widths) is int: # uniform size return [self.border_widths] * 4 return self.border_widths
<SYSTEM_TASK:> Make the given theme current. <END_TASK> <USER_TASK:> Description: def use_theme(theme): """Make the given theme current. There are two included themes: light_theme, dark_theme. """
global current current = theme import scene if scene.current is not None: scene.current.stylize()
<SYSTEM_TASK:> Set a single style value for a view class and state. <END_TASK> <USER_TASK:> Description: def set(self, class_name, state, key, value): """Set a single style value for a view class and state. class_name The name of the class to be styled; do not include the package name; e.g. 'Button'. state The name of the state to be stylized. One of the following: 'normal', 'focused', 'selected', 'disabled' is common. key The style attribute name; e.g. 'background_color'. value The value of the style attribute; colors are either a 3-tuple for RGB, a 4-tuple for RGBA, or a pair thereof for a linear gradient. """
self._styles.setdefault(class_name, {}).setdefault(state, {}) self._styles[class_name][state][key] = value
<SYSTEM_TASK:> The style dict for a given class and state. <END_TASK> <USER_TASK:> Description: def get_dict_for_class(self, class_name, state=None, base_name='View'): """The style dict for a given class and state. This collects the style attributes from parent classes and the class of the given object and gives precedence to values thereof to the children. The state attribute of the view instance is taken as the current state if state is None. If the state is not 'normal' then the style definitions for the 'normal' state are mixed-in from the given state style definitions, giving precedence to the non-'normal' style definitions. """
classes = [] klass = class_name while True: classes.append(klass) if klass.__name__ == base_name: break klass = klass.__bases__[0] if state is None: state = 'normal' style = {} for klass in classes: class_name = klass.__name__ try: state_styles = self._styles[class_name][state] except KeyError: state_styles = {} if state != 'normal': try: normal_styles = self._styles[class_name]['normal'] except KeyError: normal_styles = {} state_styles = dict(chain(normal_styles.iteritems(), state_styles.iteritems())) style = dict(chain(state_styles.iteritems(), style.iteritems())) return style
<SYSTEM_TASK:> Get a single style attribute value for the given class. <END_TASK> <USER_TASK:> Description: def get_value(self, class_name, attr, default_value=None, state='normal', base_name='View'): """Get a single style attribute value for the given class. """
styles = self.get_dict_for_class(class_name, state, base_name) try: return styles[attr] except KeyError: return default_value
<SYSTEM_TASK:> Instantiates and returns a model field for FieldHistory.object_id. <END_TASK> <USER_TASK:> Description: def instantiate_object_id_field(object_id_class_or_tuple=models.TextField): """ Instantiates and returns a model field for FieldHistory.object_id. object_id_class_or_tuple may be either a Django model field class or a tuple of (model_field, kwargs), where kwargs is a dict passed to model_field's constructor. """
if isinstance(object_id_class_or_tuple, (list, tuple)): object_id_class, object_id_kwargs = object_id_class_or_tuple else: object_id_class = object_id_class_or_tuple object_id_kwargs = {} if not issubclass(object_id_class, models.fields.Field): raise TypeError('settings.%s must be a Django model field or (field, kwargs) tuple' % OBJECT_ID_TYPE_SETTING) if not isinstance(object_id_kwargs, dict): raise TypeError('settings.%s kwargs must be a dict' % OBJECT_ID_TYPE_SETTING) return object_id_class(db_index=True, **object_id_kwargs)
<SYSTEM_TASK:> Report if the session key has expired. <END_TASK> <USER_TASK:> Description: def has_expired(self, lifetime, now=None): """Report if the session key has expired. :param lifetime: A :class:`datetime.timedelta` that specifies the maximum age this :class:`SessionID` should be checked against. :param now: If specified, use this :class:`~datetime.datetime` instance instead of :meth:`~datetime.datetime.utcnow()` as the current time. """
now = now or datetime.utcnow() return now > self.created + lifetime
<SYSTEM_TASK:> Unserializes from a string. <END_TASK> <USER_TASK:> Description: def unserialize(cls, string): """Unserializes from a string. :param string: A string created by :meth:`serialize`. """
id_s, created_s = string.split('_') return cls(int(id_s, 16), datetime.utcfromtimestamp(int(created_s, 16)))
<SYSTEM_TASK:> Destroys a session completely, by deleting all keys and removing it <END_TASK> <USER_TASK:> Description: def destroy(self): """Destroys a session completely, by deleting all keys and removing it from the internal store immediately. This allows removing a session for security reasons, e.g. a login stored in a session will cease to exist if the session is destroyed. """
for k in list(self.keys()): del self[k] if getattr(self, 'sid_s', None): current_app.kvsession_store.delete(self.sid_s) self.sid_s = None self.modified = False self.new = False
<SYSTEM_TASK:> Generate a new session id for this session. <END_TASK> <USER_TASK:> Description: def regenerate(self): """Generate a new session id for this session. To avoid vulnerabilities through `session fixation attacks <http://en.wikipedia.org/wiki/Session_fixation>`_, this function can be called after an action like a login has taken place. The session will be copied over to a new session id and the old one removed. """
self.modified = True if getattr(self, 'sid_s', None): # delete old session current_app.kvsession_store.delete(self.sid_s) # remove sid_s, set modified self.sid_s = None self.modified = True
<SYSTEM_TASK:> Removes all expired session from the store. <END_TASK> <USER_TASK:> Description: def cleanup_sessions(self, app=None): """Removes all expired session from the store. Periodically, this function can be called to remove sessions from the backend store that have expired, as they are not removed automatically unless the backend supports time-to-live and has been configured appropriately (see :class:`~simplekv.TimeToLiveMixin`). This function retrieves all session keys, checks they are older than :attr:`flask.Flask.permanent_session_lifetime` and if so, removes them. Note that no distinction is made between non-permanent and permanent sessions. :param app: The app whose sessions should be cleaned up. If ``None``, uses :py:data:`~flask.current_app`."""
if not app: app = current_app for key in app.kvsession_store.keys(): m = self.key_regex.match(key) now = datetime.utcnow() if m: # read id sid = SessionID.unserialize(key) # remove if expired if sid.has_expired(app.permanent_session_lifetime, now): app.kvsession_store.delete(key)
<SYSTEM_TASK:> Initialize application and KVSession. <END_TASK> <USER_TASK:> Description: def init_app(self, app, session_kvstore=None): """Initialize application and KVSession. This will replace the session management of the application with Flask-KVSession's. :param app: The :class:`~flask.Flask` app to be initialized."""
app.config.setdefault('SESSION_KEY_BITS', 64) app.config.setdefault('SESSION_RANDOM_SOURCE', SystemRandom()) if not session_kvstore and not self.default_kvstore: raise ValueError('Must supply session_kvstore either on ' 'construction or init_app().') # set store on app, either use default # or supplied argument app.kvsession_store = session_kvstore or self.default_kvstore app.session_interface = KVSessionInterface()
<SYSTEM_TASK:> This is a utility function used in the certification modules to transfer <END_TASK> <USER_TASK:> Description: def transfer_config_dict(soap_object, data_dict): """ This is a utility function used in the certification modules to transfer the data dicts above to SOAP objects. This avoids repetition and allows us to store all of our variable configuration here rather than in each certification script. """
for key, val in data_dict.items(): # Transfer each key to the matching attribute ont he SOAP object. setattr(soap_object, key, val)
<SYSTEM_TASK:> Adds a package to the ship request. <END_TASK> <USER_TASK:> Description: def add_package(self, package_item): """ Adds a package to the ship request. @type package_item: WSDL object, type of RequestedPackageLineItem WSDL object. @keyword package_item: A RequestedPackageLineItem, created by calling create_wsdl_object_of_type('RequestedPackageLineItem') on this ShipmentRequest object. See examples/create_shipment.py for more details. """
self.RequestedShipment.RequestedPackageLineItems.append(package_item) package_weight = package_item.Weight.Value self.RequestedShipment.TotalWeight.Value += package_weight self.RequestedShipment.PackageCount += 1
<SYSTEM_TASK:> Preps the WSDL data structures for the user. <END_TASK> <USER_TASK:> Description: def _prepare_wsdl_objects(self): """ Preps the WSDL data structures for the user. """
self.DeletionControlType = self.client.factory.create('DeletionControlType') self.TrackingId = self.client.factory.create('TrackingId') self.TrackingId.TrackingIdType = self.client.factory.create('TrackingIdType')
<SYSTEM_TASK:> Sets up the WebAuthenticationDetail node. This is required for all <END_TASK> <USER_TASK:> Description: def __set_web_authentication_detail(self): """ Sets up the WebAuthenticationDetail node. This is required for all requests. """
# Start of the authentication stuff. web_authentication_credential = self.client.factory.create('WebAuthenticationCredential') web_authentication_credential.Key = self.config_obj.key web_authentication_credential.Password = self.config_obj.password # Encapsulates the auth credentials. web_authentication_detail = self.client.factory.create('WebAuthenticationDetail') web_authentication_detail.UserCredential = web_authentication_credential # Set Default ParentCredential if hasattr(web_authentication_detail, 'ParentCredential'): web_authentication_detail.ParentCredential = web_authentication_credential self.WebAuthenticationDetail = web_authentication_detail
<SYSTEM_TASK:> Sets up the ClientDetail node, which is required for all shipping <END_TASK> <USER_TASK:> Description: def __set_client_detail(self, *args, **kwargs): """ Sets up the ClientDetail node, which is required for all shipping related requests. """
client_detail = self.client.factory.create('ClientDetail') client_detail.AccountNumber = self.config_obj.account_number client_detail.MeterNumber = self.config_obj.meter_number client_detail.IntegratorId = self.config_obj.integrator_id if hasattr(client_detail, 'Region'): client_detail.Region = self.config_obj.express_region_code client_language_code = kwargs.get('client_language_code', None) client_locale_code = kwargs.get('client_locale_code', None) if hasattr(client_detail, 'Localization') and (client_language_code or client_locale_code): localization = self.client.factory.create('Localization') if client_language_code: localization.LanguageCode = client_language_code if client_locale_code: localization.LocaleCode = client_locale_code client_detail.Localization = localization self.ClientDetail = client_detail
<SYSTEM_TASK:> Checks kwargs for 'customer_transaction_id' and sets it if present. <END_TASK> <USER_TASK:> Description: def __set_transaction_detail(self, *args, **kwargs): """ Checks kwargs for 'customer_transaction_id' and sets it if present. """
customer_transaction_id = kwargs.get('customer_transaction_id', None) if customer_transaction_id: transaction_detail = self.client.factory.create('TransactionDetail') transaction_detail.CustomerTransactionId = customer_transaction_id self.logger.debug(transaction_detail) self.TransactionDetail = transaction_detail
<SYSTEM_TASK:> Pulles the versioning info for the request from the child request. <END_TASK> <USER_TASK:> Description: def __set_version_id(self): """ Pulles the versioning info for the request from the child request. """
version_id = self.client.factory.create('VersionId') version_id.ServiceId = self._version_info['service_id'] version_id.Major = self._version_info['major'] version_id.Intermediate = self._version_info['intermediate'] version_id.Minor = self._version_info['minor'] self.logger.debug(version_id) self.VersionId = version_id
<SYSTEM_TASK:> This checks the response for general Fedex errors that aren't related <END_TASK> <USER_TASK:> Description: def __check_response_for_fedex_error(self): """ This checks the response for general Fedex errors that aren't related to any one WSDL. """
if self.response.HighestSeverity == "FAILURE": for notification in self.response.Notifications: if notification.Severity == "FAILURE": raise FedexFailure(notification.Code, notification.Message)
<SYSTEM_TASK:> Override this in each service module to check for errors that are <END_TASK> <USER_TASK:> Description: def _check_response_for_request_errors(self): """ Override this in each service module to check for errors that are specific to that module. For example, invalid tracking numbers in a Tracking request. """
if self.response.HighestSeverity == "ERROR": for notification in self.response.Notifications: if notification.Severity == "ERROR": raise FedexError(notification.Code, notification.Message)
<SYSTEM_TASK:> Sends the assembled request on the child object. <END_TASK> <USER_TASK:> Description: def send_request(self, send_function=None): """ Sends the assembled request on the child object. @type send_function: function reference @keyword send_function: A function reference (passed without the parenthesis) to a function that will send the request. This allows for overriding the default function in cases such as validation requests. """
# Send the request and get the response back. try: # If the user has overridden the send function, use theirs # instead of the default. if send_function: # Follow the overridden function. self.response = send_function() else: # Default scenario, business as usual. self.response = self._assemble_and_send_request() except suds.WebFault as fault: # When this happens, throw an informative message reminding the # user to check all required variables, making sure they are # populated and valid raise SchemaValidationError(fault.fault) # Check the response for general Fedex errors/failures that aren't # specific to any given WSDL/request. self.__check_response_for_fedex_error() # Check the response for errors specific to the particular request. # This method can be overridden by a method on the child class object. self._check_response_for_request_errors() # Check the response for errors specific to the particular request. # This method can be overridden by a method on the child class object. self._check_response_for_request_warnings() # Debug output. (See Request and Response output) self.logger.debug("== FEDEX QUERY RESULT ==") self.logger.debug(self.response)
<SYSTEM_TASK:> This sets the package identifier information. This may be a tracking <END_TASK> <USER_TASK:> Description: def _prepare_wsdl_objects(self): """ This sets the package identifier information. This may be a tracking number or a few different things as per the Fedex spec. """
self.SelectionDetails = self.client.factory.create('TrackSelectionDetail') # Default to Fedex self.SelectionDetails.CarrierCode = 'FDXE' track_package_id = self.client.factory.create('TrackPackageIdentifier') # Default to tracking number. track_package_id.Type = 'TRACKING_NUMBER_OR_DOORTAG' self.SelectionDetails.PackageIdentifier = track_package_id
<SYSTEM_TASK:> Prints all of a shipment's labels, or optionally just one. <END_TASK> <USER_TASK:> Description: def print_label(self, package_num=None): """ Prints all of a shipment's labels, or optionally just one. @type package_num: L{int} @param package_num: 0-based index of the package to print. This is only useful for shipments with more than one package. """
if package_num: packages = [ self.shipment.response.CompletedShipmentDetail.CompletedPackageDetails[package_num] ] else: packages = self.shipment.response.CompletedShipmentDetail.CompletedPackageDetails for package in packages: label_binary = binascii.a2b_base64(package.Label.Parts[0].Image) self._print_base64(label_binary)
<SYSTEM_TASK:> Pipe the binary directly to the label printer. Works under Linux <END_TASK> <USER_TASK:> Description: def _print_base64(self, base64_data): """ Pipe the binary directly to the label printer. Works under Linux without requiring PySerial. This is not typically something you should call directly, unless you have special needs. @type base64_data: L{str} @param base64_data: The base64 encoded string for the label to print. """
label_file = open(self.device, "w") label_file.write(base64_data) label_file.close()
<SYSTEM_TASK:> Function to get potential tags for files using the file names. <END_TASK> <USER_TASK:> Description: def guess_tags(filename): """ Function to get potential tags for files using the file names. :param filename: This field is the name of file. """
tags = [] stripped_filename = strip_zip_suffix(filename) if stripped_filename.endswith('.vcf'): tags.append('vcf') if stripped_filename.endswith('.json'): tags.append('json') if stripped_filename.endswith('.csv'): tags.append('csv') return tags
<SYSTEM_TASK:> Collate local file info as preperation for Open Humans upload. <END_TASK> <USER_TASK:> Description: def characterize_local_files(filedir, max_bytes=MAX_FILE_DEFAULT): """ Collate local file info as preperation for Open Humans upload. Note: Files with filesize > max_bytes are not included in returned info. :param filedir: This field is target directory to get files from. :param max_bytes: This field is the maximum file size to consider. Its default value is 128m. """
file_data = {} logging.info('Characterizing files in {}'.format(filedir)) for filename in os.listdir(filedir): filepath = os.path.join(filedir, filename) file_stats = os.stat(filepath) creation_date = arrow.get(file_stats.st_ctime).isoformat() file_size = file_stats.st_size if file_size <= max_bytes: file_md5 = hashlib.md5() with open(filepath, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): file_md5.update(chunk) md5 = file_md5.hexdigest() file_data[filename] = { 'tags': guess_tags(filename), 'description': '', 'md5': md5, 'creation_date': creation_date, } return file_data
<SYSTEM_TASK:> Check that the files listed in metadata exactly match files in target dir. <END_TASK> <USER_TASK:> Description: def validate_metadata(target_dir, metadata): """ Check that the files listed in metadata exactly match files in target dir. :param target_dir: This field is the target directory from which to match metadata :param metadata: This field contains the metadata to be matched. """
if not os.path.isdir(target_dir): print("Error: " + target_dir + " is not a directory") return False file_list = os.listdir(target_dir) for filename in file_list: if filename not in metadata: print("Error: " + filename + " present at" + target_dir + " not found in metadata file") return False for filename in metadata: if filename not in file_list: print("Error: " + filename + " present in metadata file " + " not found on disk at: " + target_dir) return False return True
<SYSTEM_TASK:> Return the metadata as requested for a single user. <END_TASK> <USER_TASK:> Description: def load_metadata_csv_single_user(csv_in, header, tags_idx): """ Return the metadata as requested for a single user. :param csv_in: This field is the csv file to return metadata from. :param header: This field contains the headers in the csv file :param tags_idx: This field contains the index of the tags in the csv file. """
metadata = {} n_headers = len(header) for index, row in enumerate(csv_in, 2): if row[0] == "": raise ValueError('Error: In row number ' + str(index) + ':' + ' "filename" must not be empty.') if row[0] == 'None' and [x == 'NA' for x in row[1:]]: break if len(row) != n_headers: raise ValueError('Error: In row number ' + str(index) + ':' + ' Number of columns (' + str(len(row)) + ') doesnt match Number of headings (' + str(n_headers) + ')') metadata[row[0]] = { header[i]: row[i] for i in range(1, len(header)) if i != tags_idx } metadata[row[0]]['tags'] = [t.strip() for t in row[tags_idx].split(',') if t.strip()] return metadata
<SYSTEM_TASK:> Return dict of metadata. <END_TASK> <USER_TASK:> Description: def load_metadata_csv(input_filepath): """ Return dict of metadata. Format is either dict (filenames are keys) or dict-of-dicts (project member IDs as top level keys, then filenames as keys). :param input_filepath: This field is the filepath of the csv file. """
with open(input_filepath) as f: csv_in = csv.reader(f) header = next(csv_in) if 'tags' in header: tags_idx = header.index('tags') else: raise ValueError('"tags" is a compulsory column in metadata file.') if header[0] == 'project_member_id': if header[1] == 'filename': metadata = load_metadata_csv_multi_user(csv_in, header, tags_idx) else: raise ValueError('The second column must be "filename"') elif header[0] == 'filename': metadata = load_metadata_csv_single_user(csv_in, header, tags_idx) else: raise ValueError('Incorrect Formatting of metadata. The first' + ' column for single user upload should be' + ' "filename". For multiuser uploads the first ' + 'column should be "project member id" and the' + ' second column should be "filename"') return metadata
<SYSTEM_TASK:> Check if date is in ISO 8601 format. <END_TASK> <USER_TASK:> Description: def validate_date(date, project_member_id, filename): """ Check if date is in ISO 8601 format. :param date: This field is the date to be checked. :param project_member_id: This field is the project_member_id corresponding to the date provided. :param filename: This field is the filename corresponding to the date provided. """
try: arrow.get(date) except Exception: return False return True
<SYSTEM_TASK:> Check if metadata fields like project member id, description, tags, md5 and <END_TASK> <USER_TASK:> Description: def is_single_file_metadata_valid(file_metadata, project_member_id, filename): """ Check if metadata fields like project member id, description, tags, md5 and creation date are valid for a single file. :param file_metadata: This field is metadata of file. :param project_member_id: This field is the project member id corresponding to the file metadata provided. :param filename: This field is the filename corresponding to the file metadata provided. """
if project_member_id is not None: if not project_member_id.isdigit() or len(project_member_id) != 8: raise ValueError( 'Error: for project member id: ', project_member_id, ' and filename: ', filename, ' project member id must be of 8 digits from 0 to 9') if 'description' not in file_metadata: raise ValueError( 'Error: for project member id: ', project_member_id, ' and filename: ', filename, ' "description" is a required field of the metadata') if not isinstance(file_metadata['description'], str): raise ValueError( 'Error: for project member id: ', project_member_id, ' and filename: ', filename, ' "description" must be a string') if 'tags' not in file_metadata: raise ValueError( 'Error: for project member id: ', project_member_id, ' and filename: ', filename, ' "tags" is a required field of the metadata') if not isinstance(file_metadata['tags'], list): raise ValueError( 'Error: for project member id: ', project_member_id, ' and filename: ', filename, ' "tags" must be an array of strings') if 'creation_date' in file_metadata: if not validate_date(file_metadata['creation_date'], project_member_id, filename): raise ValueError( 'Error: for project member id: ', project_member_id, ' and filename: ', filename, ' Dates must be in ISO 8601 format') if 'md5' in file_metadata: if not re.match(r'[a-f0-9]{32}$', file_metadata['md5'], flags=re.IGNORECASE): raise ValueError( 'Error: for project member id: ', project_member_id, ' and filename: ', filename, ' Invalid MD5 specified') return True
<SYSTEM_TASK:> Check validity of metadata for single user. <END_TASK> <USER_TASK:> Description: def review_metadata_csv_single_user(filedir, metadata, csv_in, n_headers): """ Check validity of metadata for single user. :param filedir: This field is the filepath of the directory whose csv has to be made. :param metadata: This field is the metadata generated from the load_metadata_csv function. :param csv_in: This field returns a reader object which iterates over the csv. :param n_headers: This field is the number of headers in the csv. """
try: if not validate_metadata(filedir, metadata): return False for filename, file_metadata in metadata.items(): is_single_file_metadata_valid(file_metadata, None, filename) except ValueError as e: print_error(e) return False return True
<SYSTEM_TASK:> Check that all folders in the given directory have a corresponding <END_TASK> <USER_TASK:> Description: def validate_subfolders(filedir, metadata): """ Check that all folders in the given directory have a corresponding entry in the metadata file, and vice versa. :param filedir: This field is the target directory from which to match metadata :param metadata: This field contains the metadata to be matched. """
if not os.path.isdir(filedir): print("Error: " + filedir + " is not a directory") return False subfolders = os.listdir(filedir) for subfolder in subfolders: if subfolder not in metadata: print("Error: folder " + subfolder + " present on disk but not in metadata") return False for subfolder in metadata: if subfolder not in subfolders: print("Error: folder " + subfolder + " present in metadata but not on disk") return False return True
<SYSTEM_TASK:> Check validity of metadata for multi user. <END_TASK> <USER_TASK:> Description: def review_metadata_csv_multi_user(filedir, metadata, csv_in, n_headers): """ Check validity of metadata for multi user. :param filedir: This field is the filepath of the directory whose csv has to be made. :param metadata: This field is the metadata generated from the load_metadata_csv function. :param csv_in: This field returns a reader object which iterates over the csv. :param n_headers: This field is the number of headers in the csv. """
try: if not validate_subfolders(filedir, metadata): return False for project_member_id, member_metadata in metadata.items(): if not validate_metadata(os.path.join (filedir, project_member_id), member_metadata): return False for filename, file_metadata in member_metadata.items(): is_single_file_metadata_valid(file_metadata, project_member_id, filename) except ValueError as e: print_error(e) return False return True
<SYSTEM_TASK:> Check validity of metadata fields. <END_TASK> <USER_TASK:> Description: def review_metadata_csv(filedir, input_filepath): """ Check validity of metadata fields. :param filedir: This field is the filepath of the directory whose csv has to be made. :param outputfilepath: This field is the file path of the output csv. :param max_bytes: This field is the maximum file size to consider. Its default value is 128m. """
try: metadata = load_metadata_csv(input_filepath) except ValueError as e: print_error(e) return False with open(input_filepath) as f: csv_in = csv.reader(f) header = next(csv_in) n_headers = len(header) if header[0] == 'filename': res = review_metadata_csv_single_user(filedir, metadata, csv_in, n_headers) return res if header[0] == 'project_member_id': res = review_metadata_csv_multi_user(filedir, metadata, csv_in, n_headers) return res
<SYSTEM_TASK:> Make metadata file for all files in a directory. <END_TASK> <USER_TASK:> Description: def mk_metadata_csv(filedir, outputfilepath, max_bytes=MAX_FILE_DEFAULT): """ Make metadata file for all files in a directory. :param filedir: This field is the filepath of the directory whose csv has to be made. :param outputfilepath: This field is the file path of the output csv. :param max_bytes: This field is the maximum file size to consider. Its default value is 128m. """
with open(outputfilepath, 'w') as filestream: write_metadata_to_filestream(filedir, filestream, max_bytes)
<SYSTEM_TASK:> Get project member id from a file. <END_TASK> <USER_TASK:> Description: def read_id_list(filepath): """ Get project member id from a file. :param filepath: This field is the path of file to read. """
if not filepath: return None id_list = [] with open(filepath) as f: for line in f: line = line.rstrip() if not re.match('^[0-9]{8}$', line): raise('Each line in whitelist or blacklist is expected ' 'to contain an eight digit ID, and nothing else.') else: id_list.append(line) return id_list
<SYSTEM_TASK:> Function for setting the logging level. <END_TASK> <USER_TASK:> Description: def set_log_level(debug, verbose): """ Function for setting the logging level. :param debug: This boolean field is the logging level. :param verbose: This boolean field is the logging level. """
if debug: logging.basicConfig(level=logging.DEBUG) elif verbose: logging.basicConfig(level=logging.INFO)
<SYSTEM_TASK:> Download data from project members to the target directory. <END_TASK> <USER_TASK:> Description: def download(directory, master_token=None, member=None, access_token=None, source=None, project_data=False, max_size='128m', verbose=False, debug=False, memberlist=None, excludelist=None, id_filename=False): """ Download data from project members to the target directory. Unless this is a member-specific download, directories will be created for each project member ID. Also, unless a source is specified, all shared sources are downloaded and data is sorted into subdirectories according to source. Projects can optionally return data to Open Humans member accounts. If project_data is True (or the "--project-data" flag is used), this data (the project's own data files, instead of data from other sources) will be downloaded for each member. :param directory: This field is the target directory to download data. :param master_token: This field is the master access token for the project. It's default value is None. :param member: This field is specific member whose project data is downloaded. It's default value is None. :param access_token: This field is the user specific access token. It's default value is None. :param source: This field is the data source. It's default value is None. :param project_data: This field is data related to particular project. It's default value is False. :param max_size: This field is the maximum file size. It's default value is 128m. :param verbose: This boolean field is the logging level. It's default value is False. :param debug: This boolean field is the logging level. It's default value is False. :param memberlist: This field is list of members whose data will be downloaded. It's default value is None. :param excludelist: This field is list of members whose data will be skipped. It's default value is None. """
set_log_level(debug, verbose) if (memberlist or excludelist) and (member or access_token): raise UsageError('Please do not provide a memberlist or excludelist ' 'when retrieving data for a single member.') memberlist = read_id_list(memberlist) excludelist = read_id_list(excludelist) if not (master_token or access_token) or (master_token and access_token): raise UsageError('Please specify either a master access token (-T), ' 'or an OAuth2 user access token (-t).') if (source and project_data): raise UsageError("It doesn't make sense to use both 'source' and" "'project-data' options!") if master_token: project = OHProject(master_access_token=master_token) if member: if project_data: project.download_member_project_data( member_data=project.project_data[member], target_member_dir=directory, max_size=max_size, id_filename=id_filename) else: project.download_member_shared( member_data=project.project_data[member], target_member_dir=directory, source=source, max_size=max_size, id_filename=id_filename) else: project.download_all(target_dir=directory, source=source, max_size=max_size, memberlist=memberlist, excludelist=excludelist, project_data=project_data, id_filename=id_filename) else: member_data = exchange_oauth2_member(access_token, all_files=True) if project_data: OHProject.download_member_project_data(member_data=member_data, target_member_dir=directory, max_size=max_size, id_filename=id_filename) else: OHProject.download_member_shared(member_data=member_data, target_member_dir=directory, source=source, max_size=max_size, id_filename=id_filename)
<SYSTEM_TASK:> Output CSV with metadata for a project's downloadable files in Open Humans. <END_TASK> <USER_TASK:> Description: def download_metadata(master_token, output_csv, verbose=False, debug=False): """ Output CSV with metadata for a project's downloadable files in Open Humans. :param master_token: This field is the master access token for the project. :param output_csv: This field is the target csv file to which metadata is written. :param verbose: This boolean field is the logging level. It's default value is False. :param debug: This boolean field is the logging level. It's default value is False. """
set_log_level(debug, verbose) project = OHProject(master_access_token=master_token) with open(output_csv, 'w') as f: csv_writer = csv.writer(f) header = ['project_member_id', 'data_source', 'file_basename', 'file_upload_date'] csv_writer.writerow(header) for member_id in project.project_data: if not project.project_data[member_id]['data']: csv_writer.writerow([member_id, 'NA', 'None', 'NA']) else: for data_item in project.project_data[member_id]['data']: logging.debug(data_item) csv_writer.writerow([ member_id, data_item['source'], data_item['basename'].encode('utf-8'), data_item['created']])
<SYSTEM_TASK:> Upload files for the project to Open Humans member accounts. <END_TASK> <USER_TASK:> Description: def upload(directory, metadata_csv, master_token=None, member=None, access_token=None, safe=False, sync=False, max_size='128m', mode='default', verbose=False, debug=False): """ Upload files for the project to Open Humans member accounts. If using a master access token and not specifying member ID: (1) Files should be organized in subdirectories according to project member ID, e.g.: main_directory/01234567/data.json main_directory/12345678/data.json main_directory/23456789/data.json (2) The metadata CSV should have the following format: 1st column: Project member ID 2nd column: filenames 3rd & additional columns: Metadata fields (see below) If uploading for a specific member: (1) The local directory should not contain subdirectories. (2) The metadata CSV should have the following format: 1st column: filenames 2nd & additional columns: Metadata fields (see below) The default behavior is to overwrite files with matching filenames on Open Humans, but not otherwise delete files. (Use --safe or --sync to change this behavior.) If included, the following metadata columns should be correctly formatted: 'tags': should be comma-separated strings 'md5': should match the file's md5 hexdigest 'creation_date', 'start_date', 'end_date': ISO 8601 dates or datetimes Other metedata fields (e.g. 'description') can be arbitrary strings. Either specify sync as True or safe as True but not both. :param directory: This field is the target directory from which data will be uploaded. :param metadata_csv: This field is the filepath of the metadata csv file. :param master_token: This field is the master access token for the project. It's default value is None. :param member: This field is specific member whose project data is downloaded. It's default value is None. :param access_token: This field is the user specific access token. It's default value is None. :param safe: This boolean field will overwrite matching filename. It's default value is False. :param sync: This boolean field will delete files on Open Humans that are not in the local directory. It's default value is False. :param max_size: This field is the maximum file size. It's default value is None. :param mode: This field takes three value default, sync, safe. It's default value is 'default'. :param verbose: This boolean field is the logging level. It's default value is False. :param debug: This boolean field is the logging level. It's default value is False. """
if safe and sync: raise UsageError('Safe (--safe) and sync (--sync) modes are mutually ' 'incompatible!') if not (master_token or access_token) or (master_token and access_token): raise UsageError('Please specify either a master access token (-T), ' 'or an OAuth2 user access token (-t).') set_log_level(debug, verbose) if sync: mode = 'sync' elif safe: mode = 'safe' metadata = load_metadata_csv(metadata_csv) subdirs = [i for i in os.listdir(directory) if os.path.isdir(os.path.join(directory, i))] if subdirs: if not all([re.match(r'^[0-9]{8}$', d) for d in subdirs]): raise UsageError( "Subdirs expected to match project member ID format!") if (master_token and member) or not master_token: raise UsageError( "Subdirs shouldn't exist if uploading for specific member!") project = OHProject(master_access_token=master_token) for member_id in subdirs: subdir_path = os.path.join(directory, member_id) project.upload_member_from_dir( member_data=project.project_data[member_id], target_member_dir=subdir_path, metadata=metadata[member_id], mode=mode, access_token=project.master_access_token, ) else: if master_token and not (master_token and member): raise UsageError('No member specified!') if master_token: project = OHProject(master_access_token=master_token) project.upload_member_from_dir( member_data=project.project_data[member], target_member_dir=directory, metadata=metadata, mode=mode, access_token=project.master_access_token, ) else: member_data = exchange_oauth2_member(access_token) OHProject.upload_member_from_dir( member_data=member_data, target_member_dir=directory, metadata=metadata, mode=mode, access_token=access_token, )
<SYSTEM_TASK:> Command line tools for downloading public data. <END_TASK> <USER_TASK:> Description: def public_data_download_cli(source, username, directory, max_size, quiet, debug): """ Command line tools for downloading public data. """
return public_download(source, username, directory, max_size, quiet, debug)
<SYSTEM_TASK:> Download public data from Open Humans. <END_TASK> <USER_TASK:> Description: def download(source=None, username=None, directory='.', max_size='128m', quiet=None, debug=None): """ Download public data from Open Humans. :param source: This field is the data source from which to download. It's default value is None. :param username: This fiels is username of user. It's default value is None. :param directory: This field is the target directory to which data is downloaded. :param max_size: This field is the maximum file size. It's default value is 128m. :param quiet: This field is the logging level. It's default value is None. :param debug: This field is the logging level. It's default value is None. """
if debug: logging.basicConfig(level=logging.DEBUG) elif quiet: logging.basicConfig(level=logging.ERROR) else: logging.basicConfig(level=logging.INFO) logging.debug("Running with source: '{}'".format(source) + " and username: '{}'".format(username) + " and directory: '{}'".format(directory) + " and max-size: '{}'".format(max_size)) signal.signal(signal.SIGINT, signal_handler_cb) max_bytes = parse_size(max_size) options = {} if source: options['source'] = source if username: options['username'] = username page = '{}?{}'.format(BASE_URL_API, urlencode(options)) results = [] counter = 1 logging.info('Retrieving metadata') while True: logging.info('Retrieving page {}'.format(counter)) response = get_page(page) results = results + response['results'] if response['next']: page = response['next'] else: break counter += 1 logging.info('Downloading {} files'.format(len(results))) download_url_partial = partial(download_url, directory=directory, max_bytes=max_bytes) with concurrent.futures.ProcessPoolExecutor(max_workers=4) as executor: for value in executor.map(download_url_partial, results): if value: logging.info(value)
<SYSTEM_TASK:> Function returns which members have joined each activity. <END_TASK> <USER_TASK:> Description: def get_members_by_source(base_url=BASE_URL_API): """ Function returns which members have joined each activity. :param base_url: It is URL: `https://www.openhumans.org/api/public-data`. """
url = '{}members-by-source/'.format(base_url) response = get_page(url) return response
<SYSTEM_TASK:> Function returns which activities each member has joined. <END_TASK> <USER_TASK:> Description: def get_sources_by_member(base_url=BASE_URL_API, limit=LIMIT_DEFAULT): """ Function returns which activities each member has joined. :param base_url: It is URL: `https://www.openhumans.org/api/public-data`. :param limit: It is the limit of data send by one request. """
url = '{}sources-by-member/'.format(base_url) page = '{}?{}'.format(url, urlencode({'limit': limit})) results = [] while True: data = get_page(page) results = results + data['results'] if data['next']: page = data['next'] else: break return results
<SYSTEM_TASK:> Helper function to get file data of member of a project. <END_TASK> <USER_TASK:> Description: def _get_member_file_data(member_data, id_filename=False): """ Helper function to get file data of member of a project. :param member_data: This field is data related to member in a project. """
file_data = {} for datafile in member_data['data']: if id_filename: basename = '{}.{}'.format(datafile['id'], datafile['basename']) else: basename = datafile['basename'] if (basename not in file_data or arrow.get(datafile['created']) > arrow.get(file_data[basename]['created'])): file_data[basename] = datafile return file_data
<SYSTEM_TASK:> Returns data for all users including shared data files. <END_TASK> <USER_TASK:> Description: def update_data(self): """ Returns data for all users including shared data files. """
url = ('https://www.openhumans.org/api/direct-sharing/project/' 'members/?access_token={}'.format(self.master_access_token)) results = get_all_results(url) self.project_data = dict() for result in results: self.project_data[result['project_member_id']] = result if len(result['data']) < result['file_count']: member_data = get_page(result['exchange_member']) final_data = member_data['data'] while member_data['next']: member_data = get_page(member_data['next']) final_data = final_data + member_data['data'] self.project_data[ result['project_member_id']]['data'] = final_data return self.project_data
<SYSTEM_TASK:> Download files to sync a local dir to match OH member project data. <END_TASK> <USER_TASK:> Description: def download_member_project_data(cls, member_data, target_member_dir, max_size=MAX_SIZE_DEFAULT, id_filename=False): """ Download files to sync a local dir to match OH member project data. :param member_data: This field is data related to member in a project. :param target_member_dir: This field is the target directory where data will be downloaded. :param max_size: This field is the maximum file size. It's default value is 128m. """
logging.debug('Download member project data...') sources_shared = member_data['sources_shared'] file_data = cls._get_member_file_data(member_data, id_filename=id_filename) for basename in file_data: # This is using a trick to identify a project's own data in an API # response, without knowing the project's identifier: if the data # isn't a shared data source, it must be the project's own data. if file_data[basename]['source'] in sources_shared: continue target_filepath = os.path.join(target_member_dir, basename) download_file(download_url=file_data[basename]['download_url'], target_filepath=target_filepath, max_bytes=parse_size(max_size))
<SYSTEM_TASK:> Download files to sync a local dir to match OH member shared data. <END_TASK> <USER_TASK:> Description: def download_member_shared(cls, member_data, target_member_dir, source=None, max_size=MAX_SIZE_DEFAULT, id_filename=False): """ Download files to sync a local dir to match OH member shared data. Files are downloaded to match their "basename" on Open Humans. If there are multiple files with the same name, the most recent is downloaded. :param member_data: This field is data related to member in a project. :param target_member_dir: This field is the target directory where data will be downloaded. :param source: This field is the source from which to download data. :param max_size: This field is the maximum file size. It's default value is 128m. """
logging.debug('Download member shared data...') sources_shared = member_data['sources_shared'] file_data = cls._get_member_file_data(member_data, id_filename=id_filename) logging.info('Downloading member data to {}'.format(target_member_dir)) for basename in file_data: # If not in sources shared, it's the project's own data. Skip. if file_data[basename]['source'] not in sources_shared: continue # Filter source if specified. Determine target directory for file. if source: if source == file_data[basename]['source']: target_filepath = os.path.join(target_member_dir, basename) else: continue else: source_data_dir = os.path.join(target_member_dir, file_data[basename]['source']) if not os.path.exists(source_data_dir): os.mkdir(source_data_dir) target_filepath = os.path.join(source_data_dir, basename) download_file(download_url=file_data[basename]['download_url'], target_filepath=target_filepath, max_bytes=parse_size(max_size))
<SYSTEM_TASK:> Download data for all users including shared data files. <END_TASK> <USER_TASK:> Description: def download_all(self, target_dir, source=None, project_data=False, memberlist=None, excludelist=None, max_size=MAX_SIZE_DEFAULT, id_filename=False): """ Download data for all users including shared data files. :param target_dir: This field is the target directory to download data. :param source: This field is the data source. It's default value is None. :param project_data: This field is data related to particular project. It's default value is False. :param memberlist: This field is list of members whose data will be downloaded. It's default value is None. :param excludelist: This field is list of members whose data will be skipped. It's default value is None. :param max_size: This field is the maximum file size. It's default value is 128m. """
members = self.project_data.keys() for member in members: if not (memberlist is None) and member not in memberlist: logging.debug('Skipping {}, not in memberlist'.format(member)) continue if excludelist and member in excludelist: logging.debug('Skipping {}, in excludelist'.format(member)) continue member_dir = os.path.join(target_dir, member) if not os.path.exists(member_dir): os.mkdir(member_dir) if project_data: self.download_member_project_data( member_data=self.project_data[member], target_member_dir=member_dir, max_size=max_size, id_filename=id_filename) else: self.download_member_shared( member_data=self.project_data[member], target_member_dir=member_dir, source=source, max_size=max_size, id_filename=id_filename)
<SYSTEM_TASK:> Upload files in target directory to an Open Humans member's account. <END_TASK> <USER_TASK:> Description: def upload_member_from_dir(member_data, target_member_dir, metadata, access_token, mode='default', max_size=MAX_SIZE_DEFAULT): """ Upload files in target directory to an Open Humans member's account. The default behavior is to overwrite files with matching filenames on Open Humans, but not otherwise delete files. If the 'mode' parameter is 'safe': matching filenames will not be overwritten. If the 'mode' parameter is 'sync': files on Open Humans that are not in the local directory will be deleted. :param member_data: This field is data related to member in a project. :param target_member_dir: This field is the target directory from where data will be uploaded. :param metadata: This field is metadata for files to be uploaded. :param access_token: This field is user specific access token. :param mode: This field takes three value default, sync, safe. It's default value is 'default'. :param max_size: This field is the maximum file size. It's default value is 128m. """
if not validate_metadata(target_member_dir, metadata): raise ValueError('Metadata should match directory contents!') project_data = {f['basename']: f for f in member_data['data'] if f['source'] not in member_data['sources_shared']} for filename in metadata: if filename in project_data and mode == 'safe': logging.info('Skipping {}, remote exists with matching' ' name'.format(filename)) continue filepath = os.path.join(target_member_dir, filename) remote_file_info = (project_data[filename] if filename in project_data else None) upload_aws(target_filepath=filepath, metadata=metadata[filename], access_token=access_token, project_member_id=member_data['project_member_id'], remote_file_info=remote_file_info) if mode == 'sync': for filename in project_data: if filename not in metadata: logging.debug("Deleting {}".format(filename)) delete_file( file_basename=filename, access_token=access_token, project_member_id=member_data['project_member_id'])
<SYSTEM_TASK:> Get a single page of results. <END_TASK> <USER_TASK:> Description: def get_page(url): """ Get a single page of results. :param url: This field is the url from which data will be requested. """
response = requests.get(url) handle_error(response, 200) data = response.json() return data
<SYSTEM_TASK:> Given starting API query for Open Humans, iterate to get all results. <END_TASK> <USER_TASK:> Description: def get_all_results(starting_page): """ Given starting API query for Open Humans, iterate to get all results. :param starting page: This field is the first page, starting from which results will be obtained. """
logging.info('Retrieving all results for {}'.format(starting_page)) page = starting_page results = [] while True: logging.debug('Getting data from: {}'.format(page)) data = get_page(page) logging.debug('JSON data: {}'.format(data)) results = results + data['results'] if data['next']: page = data['next'] else: break return results
<SYSTEM_TASK:> Returns data for a specific user, including shared data files. <END_TASK> <USER_TASK:> Description: def exchange_oauth2_member(access_token, base_url=OH_BASE_URL, all_files=False): """ Returns data for a specific user, including shared data files. :param access_token: This field is the user specific access_token. :param base_url: It is this URL `https://www.openhumans.org`. """
url = urlparse.urljoin( base_url, '/api/direct-sharing/project/exchange-member/?{}'.format( urlparse.urlencode({'access_token': access_token}))) member_data = get_page(url) returned = member_data.copy() # Get all file data if all_files is True. if all_files: while member_data['next']: member_data = get_page(member_data['next']) returned['data'] = returned['data'] + member_data['data'] logging.debug('JSON data: {}'.format(returned)) return returned
<SYSTEM_TASK:> Helper function to match reponse of a request to the expected status <END_TASK> <USER_TASK:> Description: def handle_error(r, expected_code): """ Helper function to match reponse of a request to the expected status code :param r: This field is the response of request. :param expected_code: This field is the expected status code for the function. """
code = r.status_code if code != expected_code: info = 'API response status code {}'.format(code) try: if 'detail' in r.json(): info = info + ": {}".format(r.json()['detail']) elif 'metadata' in r.json(): info = info + ": {}".format(r.json()['metadata']) except json.decoder.JSONDecodeError: info = info + ":\n{}".format(r.content) raise Exception(info)
<SYSTEM_TASK:> Return the wire packed version of `from_`. `pack_type` should be some <END_TASK> <USER_TASK:> Description: def pack_list(from_, pack_type): """ Return the wire packed version of `from_`. `pack_type` should be some subclass of `xcffib.Struct`, or a string that can be passed to `struct.pack`. You must pass `size` if `pack_type` is a struct.pack string. """
# We need from_ to not be empty if len(from_) == 0: return bytes() if pack_type == 'c': if isinstance(from_, bytes): # Catch Python 3 bytes and Python 2 strings # PY3 is "helpful" in that when you do tuple(b'foo') you get # (102, 111, 111) instead of something more reasonable like # (b'f', b'o', b'o'), so we rebuild from_ as a tuple of bytes from_ = [six.int2byte(b) for b in six.iterbytes(from_)] elif isinstance(from_, six.string_types): # Catch Python 3 strings and Python 2 unicode strings, both of # which we encode to bytes as utf-8 # Here we create the tuple of bytes from the encoded string from_ = [six.int2byte(b) for b in bytearray(from_, 'utf-8')] elif isinstance(from_[0], six.integer_types): # Pack from_ as char array, where from_ may be an array of ints # possibly greater than 256 def to_bytes(v): for _ in range(4): v, r = divmod(v, 256) yield r from_ = [six.int2byte(b) for i in from_ for b in to_bytes(i)] if isinstance(pack_type, six.string_types): return struct.pack("=" + pack_type * len(from_), *from_) else: buf = six.BytesIO() for item in from_: # If we can't pack it, you'd better have packed it yourself. But # let's not confuse things which aren't our Probobjs for packable # things. if isinstance(item, Protobj) and hasattr(item, "pack"): buf.write(item.pack()) else: buf.write(item) return buf.getvalue()
<SYSTEM_TASK:> Check that the connection is valid both before and <END_TASK> <USER_TASK:> Description: def ensure_connected(f): """ Check that the connection is valid both before and after the function is invoked. """
@functools.wraps(f) def wrapper(*args): self = args[0] self.invalid() try: return f(*args) finally: self.invalid() return wrapper
<SYSTEM_TASK:> Returns the xcb_screen_t for every screen <END_TASK> <USER_TASK:> Description: def get_screen_pointers(self): """ Returns the xcb_screen_t for every screen useful for other bindings """
root_iter = lib.xcb_setup_roots_iterator(self._setup) screens = [root_iter.data] for i in range(self._setup.roots_len - 1): lib.xcb_screen_next(ffi.addressof((root_iter))) screens.append(root_iter.data) return screens
<SYSTEM_TASK:> Hoist an xcb_generic_event_t to the right xcffib structure. <END_TASK> <USER_TASK:> Description: def hoist_event(self, e): """ Hoist an xcb_generic_event_t to the right xcffib structure. """
if e.response_type == 0: return self._process_error(ffi.cast("xcb_generic_error_t *", e)) # We mask off the high bit here because events sent with SendEvent have # this bit set. We don't actually care where the event came from, so we # just throw this away. Maybe we could expose this, if anyone actually # cares about it. event = self._event_offsets[e.response_type & 0x7f] buf = CffiUnpacker(e) return event(buf)
<SYSTEM_TASK:> Greedy serialization requires the value to either be a column <END_TASK> <USER_TASK:> Description: def serialize(self, value, greedy=True): """ Greedy serialization requires the value to either be a column or convertible to a column, whereas non-greedy serialization will pass through any string as-is and will only serialize Column objects. Non-greedy serialization is useful when preparing queries with custom filters or segments. """
if greedy and not isinstance(value, Column): value = self.normalize(value) if isinstance(value, Column): return value.id else: return value
<SYSTEM_TASK:> Generate a query by describing it as a series of actions <END_TASK> <USER_TASK:> Description: def describe(profile, description): """ Generate a query by describing it as a series of actions and parameters to those actions. These map directly to Query methods and arguments to those methods. This is an alternative to the chaining interface. Mostly useful if you'd like to put your queries in a file, rather than in Python code. """
api_type = description.pop('type', 'core') api = getattr(profile, api_type) return refine(api.query, description)
<SYSTEM_TASK:> Refine a query from a dictionary of parameters that describes it. <END_TASK> <USER_TASK:> Description: def refine(query, description): """ Refine a query from a dictionary of parameters that describes it. See `describe` for more information. """
for attribute, arguments in description.items(): if hasattr(query, attribute): attribute = getattr(query, attribute) else: raise ValueError("Unknown query method: " + attribute) # query descriptions are often automatically generated, and # may include empty calls, which we skip if utils.isempty(arguments): continue if callable(attribute): method = attribute if isinstance(arguments, dict): query = method(**arguments) elif isinstance(arguments, list): query = method(*arguments) else: query = method(arguments) else: setattr(attribute, arguments) return query
<SYSTEM_TASK:> `set` is a way to add raw properties to the request, <END_TASK> <USER_TASK:> Description: def set(self, key=None, value=None, **kwargs): """ `set` is a way to add raw properties to the request, for features that this module does not support or supports incompletely. For convenience's sake, it will serialize Column objects but will leave any other kind of value alone. """
serialize = partial(self.api.columns.serialize, greedy=False) if key and value: self.raw[key] = serialize(value) elif key or kwargs: properties = key or kwargs for key, value in properties.items(): self.raw[key] = serialize(value) else: raise ValueError( "Query#set requires a key and value, a properties dictionary or keyword arguments.") return self
<SYSTEM_TASK:> A list of the metrics this query will ask for. <END_TASK> <USER_TASK:> Description: def description(self): """ A list of the metrics this query will ask for. """
if 'metrics' in self.raw: metrics = self.raw['metrics'] head = metrics[0:-1] or metrics[0:1] text = ", ".join(head) if len(metrics) > 1: tail = metrics[-1] text = text + " and " + tail else: text = 'n/a' return text
<SYSTEM_TASK:> Return a new query which will produce results sorted by <END_TASK> <USER_TASK:> Description: def sort(self, *columns, **options): """ Return a new query which will produce results sorted by one or more metrics or dimensions. You may use plain strings for the columns, or actual `Column`, `Metric` and `Dimension` objects. Add a minus in front of the metric (either the string or the object) to sort in descending order. ```python # sort using strings query.sort('pageviews', '-device type') # alternatively, ask for a descending sort in a keyword argument query.sort('pageviews', descending=True) # sort using metric, dimension or column objects pageviews = profile.core.metrics['pageviews'] query.sort(-pageviews) ``` """
sorts = self.meta.setdefault('sort', []) for column in columns: if isinstance(column, Column): identifier = column.id elif isinstance(column, utils.basestring): descending = column.startswith('-') or options.get('descending', False) identifier = self.api.columns[column.lstrip('-')].id else: raise ValueError("Can only sort on columns or column strings. Received: {}".format(column)) if descending: sign = '-' else: sign = '' sorts.append(sign + identifier) self.raw['sort'] = ",".join(sorts) return self
<SYSTEM_TASK:> Most of the actual functionality lives on the Column <END_TASK> <USER_TASK:> Description: def filter(self, value=None, exclude=False, **selection): """ Most of the actual functionality lives on the Column object and the `all` and `any` functions. """
filters = self.meta.setdefault('filters', []) if value and len(selection): raise ValueError("Cannot specify a filter string and a filter keyword selection at the same time.") elif value: value = [value] elif len(selection): value = select(self.api.columns, selection, invert=exclude) filters.append(value) self.raw['filters'] = utils.paste(filters, ',', ';') return self
<SYSTEM_TASK:> Return a new query that fetches metrics within a certain date range. <END_TASK> <USER_TASK:> Description: def range(self, start=None, stop=None, months=0, days=0): """ Return a new query that fetches metrics within a certain date range. ```python query.range('2014-01-01', '2014-06-30') ``` If you don't specify a `stop` argument, the date range will end today. If instead you meant to fetch just a single day's results, try: ```python query.range('2014-01-01', days=1) ``` More generally, you can specify that you'd like a certain number of days, starting from a certain date: ```python query.range('2014-01-01', months=3) query.range('2014-01-01', days=28) ``` Note that if you don't specify a granularity (either through the `interval` method or through the `hourly`, `daily`, `weekly`, `monthly` or `yearly` shortcut methods) you will get only a single result, encompassing the entire date range, per metric. **Note:** it is currently not possible to easily specify that you'd like to query the last last full week(s), month(s) et cetera. This will be added sometime in the future. """
start, stop = utils.date.range(start, stop, months, days) self.raw.update({ 'start_date': start, 'end_date': stop, }) return self
<SYSTEM_TASK:> Return a new query, limited to a segment of all users or sessions. <END_TASK> <USER_TASK:> Description: def segment(self, value=None, scope=None, metric_scope=None, **selection): """ Return a new query, limited to a segment of all users or sessions. Accepts segment objects, filtered segment objects and segment names: ```python query.segment(account.segments['browser']) query.segment('browser') query.segment(account.segments['browser'].any('Chrome', 'Firefox')) ``` Segment can also accept a segment expression when you pass in a `type` argument. The type argument can be either `users` or `sessions`. This is pretty close to the metal. ```python # will be translated into `users::condition::perUser::ga:sessions>10` query.segment('condition::perUser::ga:sessions>10', type='users') ``` See the [Google Analytics dynamic segments documentation][segments] You can also use the `any`, `all`, `followed_by` and `immediately_followed_by` functions in this module to chain together segments. Everything about how segments get handled is still in flux. Feel free to propose ideas for a nicer interface on the [GitHub issues page][issues] [segments]: https://developers.google.com/analytics/devguides/reporting/core/v3/segments#reference [issues]: https://github.com/debrouwere/google-analytics/issues """
""" Technical note to self about segments: * users or sessions * sequence or condition * scope (perHit, perSession, perUser -- gte primary scope) Multiple conditions can be ANDed or ORed together; these two are equivalent users::condition::ga:revenue>10;ga:sessionDuration>60 users::condition::ga:revenue>10;users::condition::ga:sessionDuration>60 For sequences, prepending ^ means the first part of the sequence has to match the first session/hit/... * users and sessions conditions can be combined (but only with AND) * sequences and conditions can also be combined (but only with AND) sessions::sequence::ga:browser==Chrome; condition::perHit::ga:timeOnPage>5 ->> ga:deviceCategory==mobile;ga:revenue>10; users::sequence::ga:deviceCategory==desktop ->> ga:deviceCategory=mobile; ga:revenue>100; condition::ga:browser==Chrome Problem: keyword arguments are passed as a dictionary, not an ordered dictionary! So e.g. this is risky query.sessions(time_on_page__gt=5, device_category='mobile', followed_by=True) """ SCOPES = { 'hits': 'perHit', 'sessions': 'perSession', 'users': 'perUser', } segments = self.meta.setdefault('segments', []) if value and len(selection): raise ValueError("Cannot specify a filter string and a filter keyword selection at the same time.") elif value: value = [self.api.segments.serialize(value)] elif len(selection): if not scope: raise ValueError("Scope is required. Choose from: users, sessions.") if metric_scope: metric_scope = SCOPES[metric_scope] value = select(self.api.columns, selection) value = [[scope, 'condition', metric_scope, condition] for condition in value] value = ['::'.join(filter(None, condition)) for condition in value] segments.append(value) self.raw['segment'] = utils.paste(segments, ',', ';') return self
<SYSTEM_TASK:> Return a new query with a modified `start_index`. <END_TASK> <USER_TASK:> Description: def next(self): """ Return a new query with a modified `start_index`. Mainly used internally to paginate through results. """
step = self.raw.get('max_results', 1000) start = self.raw.get('start_index', 1) + step self.raw['start_index'] = start return self
<SYSTEM_TASK:> Run the query and return a `Report`. <END_TASK> <USER_TASK:> Description: def get(self): """ Run the query and return a `Report`. This method transparently handles paginated results, so even for results that are larger than the maximum amount of rows the Google Analytics API will return in a single request, or larger than the amount of rows as specified through `CoreQuery#step`, `get` will leaf through all pages, concatenate the results and produce a single Report instance. """
cursor = self report = None is_complete = False is_enough = False while not (is_enough or is_complete): chunk = cursor.execute() if report: report.append(chunk.raw[0], cursor) else: report = chunk is_enough = len(report.rows) >= self.meta.get('limit', float('inf')) is_complete = chunk.is_complete cursor = cursor.next() return report
<SYSTEM_TASK:> Valid credentials are not necessarily correct, but <END_TASK> <USER_TASK:> Description: def valid(self): """ Valid credentials are not necessarily correct, but they contain all necessary information for an authentication attempt. """
two_legged = self.client_email and self.private_key three_legged = self.client_id and self.client_secret return two_legged or three_legged or False
<SYSTEM_TASK:> Complete credentials are valid and are either two-legged or include a token. <END_TASK> <USER_TASK:> Description: def complete(self): """ Complete credentials are valid and are either two-legged or include a token. """
return self.valid and (self.access_token or self.refresh_token or self.type == 2)
<SYSTEM_TASK:> Given a client id, client secret and either an access token or a refresh token, <END_TASK> <USER_TASK:> Description: def revoke(client_id, client_secret, client_email=None, private_key=None, access_token=None, refresh_token=None, identity=None, prefix=None, suffix=None): """ Given a client id, client secret and either an access token or a refresh token, revoke OAuth access to the Google Analytics data and remove any stored credentials that use these tokens. """
if client_email and private_key: raise ValueError('Two-legged OAuth does not use revokable tokens.') credentials = oauth.Credentials.find( complete=True, interactive=False, identity=identity, client_id=client_id, client_secret=client_secret, access_token=access_token, refresh_token=refresh_token, prefix=prefix, suffix=suffix, ) retval = credentials.revoke() keyring.delete(credentials.identity) return retval
<SYSTEM_TASK:> Allows a method to accept one or more values, <END_TASK> <USER_TASK:> Description: def vectorize(fn): """ Allows a method to accept one or more values, but internally deal only with a single item, and returning a list or a single item depending on what is desired. """
@functools.wraps(fn) def vectorized_method(self, values, *vargs, **kwargs): wrap = not isinstance(values, (list, tuple)) should_unwrap = not kwargs.setdefault('wrap', False) unwrap = wrap and should_unwrap del kwargs['wrap'] if wrap: values = [values] results = [fn(self, value, *vargs, **kwargs) for value in values] if unwrap: results = results[0] return results return vectorized_method
<SYSTEM_TASK:> A list of all web properties on this account. You may <END_TASK> <USER_TASK:> Description: def webproperties(self): """ A list of all web properties on this account. You may select a specific web property using its name, its id or an index. ```python account.webproperties[0] account.webproperties['UA-9234823-5'] account.webproperties['debrouwere.org'] ``` """
raw_properties = self.service.management().webproperties().list( accountId=self.id).execute()['items'] _webproperties = [WebProperty(raw, self) for raw in raw_properties] return addressable.List(_webproperties, indices=['id', 'name'], insensitive=True)
<SYSTEM_TASK:> A list of all profiles on this web property. You may <END_TASK> <USER_TASK:> Description: def profiles(self): """ A list of all profiles on this web property. You may select a specific profile using its name, its id or an index. ```python property.profiles[0] property.profiles['9234823'] property.profiles['marketing profile'] ``` """
raw_profiles = self.account.service.management().profiles().list( accountId=self.account.id, webPropertyId=self.id).execute()['items'] profiles = [Profile(raw, self) for raw in raw_profiles] return addressable.List(profiles, indices=['id', 'name'], insensitive=True)
<SYSTEM_TASK:> Run command with arguments and return its output as a byte string. <END_TASK> <USER_TASK:> Description: def check_output_input(*popenargs, **kwargs): """Run command with arguments and return its output as a byte string. If the exit code was non-zero it raises a CalledProcessError. The CalledProcessError object will have the return code in the returncode attribute and output in the output attribute. The arguments are the same as for the Popen constructor. Example: >>> check_output(["ls", "-l", "/dev/null"]) 'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n' The stdout argument is not allowed as it is used internally. To capture standard error in the result, use stderr=STDOUT. >>> check_output(["/bin/sh", "-c", ... "ls -l non_existent_file ; exit 0"], ... stderr=STDOUT) 'ls: non_existent_file: No such file or directory\n' There is an additional optional argument, "input", allowing you to pass a string to the subprocess's stdin. If you use this argument you may not also use the Popen constructor's "stdin" argument, as it too will be used internally. Example: >>> check_output(["sed", "-e", "s/foo/bar/"], ... input=b"when in the course of fooman events\n") b'when in the course of barman events\n' If universal_newlines=True is passed, the return value will be a string rather than bytes. """
if 'stdout' in kwargs: raise ValueError('stdout argument not allowed, it will be overridden.') if 'input' in kwargs: if 'stdin' in kwargs: raise ValueError('stdin and input arguments may not both be used.') inputdata = kwargs['input'] del kwargs['input'] kwargs['stdin'] = PIPE else: inputdata = None process = Popen(*popenargs, stdout=PIPE, **kwargs) try: output, unused_err = process.communicate(inputdata) except: process.kill() process.wait() raise retcode = process.poll() if retcode: cmd = kwargs.get("args") if cmd is None: cmd = popenargs[0] raise CalledProcessError(retcode, cmd, output=output) return output
<SYSTEM_TASK:> If `apply_encoding_options` is inadequate, one can retrieve tokens from `self.token_counts`, filter with <END_TASK> <USER_TASK:> Description: def create_token_indices(self, tokens): """If `apply_encoding_options` is inadequate, one can retrieve tokens from `self.token_counts`, filter with a desired strategy and regenerate `token_index` using this method. The token index is subsequently used when `encode_texts` or `decode_texts` methods are called. """
start_index = len(self.special_token) indices = list(range(len(tokens) + start_index)) # prepend because the special tokens come in the beginning tokens_with_special = self.special_token + list(tokens) self._token2idx = dict(list(zip(tokens_with_special, indices))) self._idx2token = dict(list(zip(indices, tokens_with_special)))
<SYSTEM_TASK:> Applies the given settings for subsequent calls to `encode_texts` and `decode_texts`. This allows you to <END_TASK> <USER_TASK:> Description: def apply_encoding_options(self, min_token_count=1, limit_top_tokens=None): """Applies the given settings for subsequent calls to `encode_texts` and `decode_texts`. This allows you to play with different settings without having to re-run tokenization on the entire corpus. Args: min_token_count: The minimum token count (frequency) in order to include during encoding. All tokens below this frequency will be encoded to `0` which corresponds to unknown token. (Default value = 1) limit_top_tokens: The maximum number of tokens to keep, based their frequency. Only the most common `limit_top_tokens` tokens will be kept. Set to None to keep everything. (Default value: None) """
if not self.has_vocab: raise ValueError("You need to build the vocabulary using `build_vocab` " "before using `apply_encoding_options`") if min_token_count < 1: raise ValueError("`min_token_count` should atleast be 1") # Remove tokens with freq < min_token_count token_counts = list(self._token_counts.items()) token_counts = [x for x in token_counts if x[1] >= min_token_count] # Clip to max_tokens. if limit_top_tokens is not None: token_counts.sort(key=lambda x: x[1], reverse=True) filtered_tokens = list(zip(*token_counts))[0] filtered_tokens = filtered_tokens[:limit_top_tokens] else: filtered_tokens = zip(*token_counts)[0] # Generate indices based on filtered tokens. self.create_token_indices(filtered_tokens)
<SYSTEM_TASK:> Encodes the given texts using internal vocabulary with optionally applied encoding options. See <END_TASK> <USER_TASK:> Description: def encode_texts(self, texts, unknown_token="<UNK>", verbose=1, **kwargs): """Encodes the given texts using internal vocabulary with optionally applied encoding options. See ``apply_encoding_options` to set various options. Args: texts: The list of text items to encode. unknown_token: The token to replace words that out of vocabulary. If none, those words are omitted. verbose: The verbosity level for progress. Can be 0, 1, 2. (Default value = 1) **kwargs: The kwargs for `token_generator`. Returns: The encoded texts. """
if not self.has_vocab: raise ValueError( "You need to build the vocabulary using `build_vocab` before using `encode_texts`") if unknown_token and unknown_token not in self.special_token: raise ValueError( "Your special token (" + unknown_token + ") to replace unknown words is not in the list of special token: " + self.special_token) progbar = Progbar(len(texts), verbose=verbose, interval=0.25) encoded_texts = [] for token_data in self.token_generator(texts, **kwargs): indices, token = token_data[:-1], token_data[-1] token_idx = self._token2idx.get(token) if token_idx is None and unknown_token: token_idx = self.special_token.index(unknown_token) if token_idx is not None: utils._append(encoded_texts, indices, token_idx) # Update progressbar per document level. progbar.update(indices[0]) # All done. Finalize progressbar. progbar.update(len(texts)) return encoded_texts
<SYSTEM_TASK:> Decodes the texts using internal vocabulary. The list structure is maintained. <END_TASK> <USER_TASK:> Description: def decode_texts(self, encoded_texts, unknown_token="<UNK>", inplace=True): """Decodes the texts using internal vocabulary. The list structure is maintained. Args: encoded_texts: The list of texts to decode. unknown_token: The placeholder value for unknown token. (Default value: "<UNK>") inplace: True to make changes inplace. (Default value: True) Returns: The decoded texts. """
if len(self._token2idx) == 0: raise ValueError( "You need to build vocabulary using `build_vocab` before using `decode_texts`") if not isinstance(encoded_texts, list): # assume it's a numpy array encoded_texts = encoded_texts.tolist() if not inplace: encoded_texts = deepcopy(encoded_texts) utils._recursive_apply(encoded_texts, lambda token_id: self._idx2token.get(token_id) or unknown_token) return encoded_texts
<SYSTEM_TASK:> Builds the internal vocabulary and computes various statistics. <END_TASK> <USER_TASK:> Description: def build_vocab(self, texts, verbose=1, **kwargs): """Builds the internal vocabulary and computes various statistics. Args: texts: The list of text items to encode. verbose: The verbosity level for progress. Can be 0, 1, 2. (Default value = 1) **kwargs: The kwargs for `token_generator`. """
if self.has_vocab: logger.warn( "Tokenizer already has existing vocabulary. Overriding and building new vocabulary.") progbar = Progbar(len(texts), verbose=verbose, interval=0.25) count_tracker = utils._CountTracker() self._token_counts.clear() self._num_texts = len(texts) for token_data in self.token_generator(texts, **kwargs): indices, token = token_data[:-1], token_data[-1] count_tracker.update(indices) self._token_counts[token] += 1 # Update progressbar per document level. progbar.update(indices[0]) # Generate token2idx and idx2token. self.create_token_indices(self._token_counts.keys()) # All done. Finalize progressbar update and count tracker. count_tracker.finalize() self._counts = count_tracker.counts progbar.update(len(texts))
<SYSTEM_TASK:> Builds an embedding matrix for all words in vocab using embeddings_index <END_TASK> <USER_TASK:> Description: def build_embedding_weights(word_index, embeddings_index): """Builds an embedding matrix for all words in vocab using embeddings_index """
logger.info('Loading embeddings for all words in the corpus') embedding_dim = list(embeddings_index.values())[0].shape[-1] # setting special tokens such as UNK and PAD to 0 # all other words are also set to 0. embedding_weights = np.zeros((len(word_index), embedding_dim)) for word, i in word_index.items(): word_vector = embeddings_index.get(word) if word_vector is not None: embedding_weights[i] = word_vector return embedding_weights
<SYSTEM_TASK:> Retrieves embeddings index from embedding name or path. Will automatically download and cache as needed. <END_TASK> <USER_TASK:> Description: def get_embeddings_index(embedding_type='glove.42B.300d', embedding_dims=None, embedding_path=None, cache=True): """Retrieves embeddings index from embedding name or path. Will automatically download and cache as needed. Args: embedding_type: The embedding type to load. embedding_path: Path to a local embedding to use instead of the embedding type. Ignores `embedding_type` if specified. Returns: The embeddings indexed by word. """
if embedding_path is not None: embedding_type = embedding_path # identify embedding by path embeddings_index = _EMBEDDINGS_CACHE.get(embedding_type) if embeddings_index is not None: return embeddings_index if embedding_path is None: embedding_type_obj = get_embedding_type(embedding_type) # some very rough wrangling of zip files with the keras util `get_file` # a special problem: when multiple files are in one zip file extract = embedding_type_obj.get('extract', True) file_path = get_file( embedding_type_obj['file'], origin=embedding_type_obj['url'], extract=extract, cache_subdir='embeddings', file_hash=embedding_type_obj.get('file_hash',)) if 'file_in_zip' in embedding_type_obj: zip_folder = file_path.split('.zip')[0] with ZipFile(file_path, 'r') as zf: zf.extractall(zip_folder) file_path = os.path.join( zip_folder, embedding_type_obj['file_in_zip']) else: if extract: if file_path.endswith('.zip'): file_path = file_path.split('.zip')[0] # if file_path.endswith('.gz'): # file_path = file_path.split('.gz')[0] else: file_path = embedding_path embeddings_index = _build_embeddings_index(file_path, embedding_dims) if cache: _EMBEDDINGS_CACHE[embedding_type] = embeddings_index return embeddings_index
<SYSTEM_TASK:> Creates `folds` number of indices that has roughly balanced multi-label distribution. <END_TASK> <USER_TASK:> Description: def equal_distribution_folds(y, folds=2): """Creates `folds` number of indices that has roughly balanced multi-label distribution. Args: y: The multi-label outputs. folds: The number of folds to create. Returns: `folds` number of indices that have roughly equal multi-label distributions. """
n, classes = y.shape # Compute sample distribution over classes dist = y.sum(axis=0).astype('float') dist /= dist.sum() index_list = [] fold_dist = np.zeros((folds, classes), dtype='float') for _ in range(folds): index_list.append([]) for i in range(n): if i < folds: target_fold = i else: normed_folds = fold_dist.T / fold_dist.sum(axis=1) how_off = normed_folds.T - dist target_fold = np.argmin( np.dot((y[i] - .5).reshape(1, -1), how_off.T)) fold_dist[target_fold] += y[i] index_list[target_fold].append(i) logger.debug("Fold distributions:") logger.debug(fold_dist) return index_list
<SYSTEM_TASK:> Builds a model that first encodes all words within sentences using `token_encoder_model`, followed by <END_TASK> <USER_TASK:> Description: def build_model(self, token_encoder_model, sentence_encoder_model, trainable_embeddings=True, output_activation='softmax'): """Builds a model that first encodes all words within sentences using `token_encoder_model`, followed by `sentence_encoder_model`. Args: token_encoder_model: An instance of `SequenceEncoderBase` for encoding tokens within sentences. This model will be applied across all sentences to create a sentence encoding. sentence_encoder_model: An instance of `SequenceEncoderBase` operating on sentence encoding generated by `token_encoder_model`. This encoding is then fed into a final `Dense` layer for classification. trainable_embeddings: Whether or not to fine tune embeddings. output_activation: The output activation to use. (Default value: 'softmax') Use: - `softmax` for binary or multi-class. - `sigmoid` for multi-label classification. - `linear` for regression output. Returns: The model output tensor. """
if not isinstance(token_encoder_model, SequenceEncoderBase): raise ValueError("`token_encoder_model` should be an instance of `{}`".format( SequenceEncoderBase)) if not isinstance(sentence_encoder_model, SequenceEncoderBase): raise ValueError("`sentence_encoder_model` should be an instance of `{}`".format( SequenceEncoderBase)) if not sentence_encoder_model.allows_dynamic_length() and self.max_sents is None: raise ValueError("Sentence encoder model '{}' requires padding. " "You need to provide `max_sents`") if self.embeddings_index is None: # The +1 is for unknown token index 0. embedding_layer = Embedding(len(self.token_index), self.embedding_dims, input_length=self.max_tokens, mask_zero=token_encoder_model.allows_dynamic_length(), trainable=trainable_embeddings) else: embedding_layer = Embedding(len(self.token_index), self.embedding_dims, weights=[build_embedding_weights( self.token_index, self.embeddings_index)], input_length=self.max_tokens, mask_zero=token_encoder_model.allows_dynamic_length(), trainable=trainable_embeddings) word_input = Input(shape=(self.max_tokens,), dtype='int32') x = embedding_layer(word_input) word_encoding = token_encoder_model(x) token_encoder_model = Model( word_input, word_encoding, name='word_encoder') doc_input = Input( shape=(self.max_sents, self.max_tokens), dtype='int32') sent_encoding = TimeDistributed(token_encoder_model)(doc_input) x = sentence_encoder_model(sent_encoding) x = Dense(self.num_classes, activation=output_activation)(x) return Model(doc_input, x)
<SYSTEM_TASK:> Splits data into a training, validation, and test set. <END_TASK> <USER_TASK:> Description: def split_data(X, y, ratio=(0.8, 0.1, 0.1)): """Splits data into a training, validation, and test set. Args: X: text data y: data labels ratio: the ratio for splitting. Default: (0.8, 0.1, 0.1) Returns: split data: X_train, X_val, X_test, y_train, y_val, y_test """
assert(sum(ratio) == 1 and len(ratio) == 3) X_train, X_rest, y_train, y_rest = train_test_split( X, y, train_size=ratio[0]) X_val, X_test, y_val, y_test = train_test_split( X_rest, y_rest, train_size=ratio[1]) return X_train, X_val, X_test, y_train, y_val, y_test
<SYSTEM_TASK:> Setup data while splitting into a training, validation, and test set. <END_TASK> <USER_TASK:> Description: def setup_data_split(X, y, tokenizer, proc_data_dir, **kwargs): """Setup data while splitting into a training, validation, and test set. Args: X: text data, y: data labels, tokenizer: A Tokenizer instance proc_data_dir: Directory for the split and processed data """
X_train, X_val, X_test, y_train, y_val, y_test = split_data(X, y) # only build vocabulary on training data tokenizer.build_vocab(X_train) process_save(X_train, y_train, tokenizer, path.join( proc_data_dir, 'train.bin'), train=True, **kwargs) process_save(X_val, y_val, tokenizer, path.join( proc_data_dir, 'val.bin'), **kwargs) process_save(X_test, y_test, tokenizer, path.join( proc_data_dir, 'test.bin'), **kwargs)
<SYSTEM_TASK:> Builds a model using the given `text_model` <END_TASK> <USER_TASK:> Description: def build_model(self, token_encoder_model, trainable_embeddings=True, output_activation='softmax'): """Builds a model using the given `text_model` Args: token_encoder_model: An instance of `SequenceEncoderBase` for encoding all the tokens within a document. This encoding is then fed into a final `Dense` layer for classification. trainable_embeddings: Whether or not to fine tune embeddings. output_activation: The output activation to use. (Default value: 'softmax') Use: - `softmax` for binary or multi-class. - `sigmoid` for multi-label classification. - `linear` for regression output. Returns: The model output tensor. """
if not isinstance(token_encoder_model, SequenceEncoderBase): raise ValueError("`token_encoder_model` should be an instance of `{}`".format( SequenceEncoderBase)) if not token_encoder_model.allows_dynamic_length() and self.max_tokens is None: raise ValueError("The provided `token_encoder_model` does not allow variable length mini-batches. " "You need to provide `max_tokens`") if self.embeddings_index is None: # The +1 is for unknown token index 0. embedding_layer = Embedding(len(self.token_index), self.embedding_dims, input_length=self.max_tokens, mask_zero=token_encoder_model.allows_dynamic_length(), trainable=trainable_embeddings) else: embedding_layer = Embedding(len(self.token_index), self.embedding_dims, weights=[build_embedding_weights( self.token_index, self.embeddings_index)], input_length=self.max_tokens, mask_zero=token_encoder_model.allows_dynamic_length(), trainable=trainable_embeddings) sequence_input = Input(shape=(self.max_tokens,), dtype='int32') x = embedding_layer(sequence_input) x = token_encoder_model(x) x = Dense(self.num_classes, activation=output_activation)(x) return Model(sequence_input, x)
<SYSTEM_TASK:> Computes softmax along a specified dim. Keras currently lacks this feature. <END_TASK> <USER_TASK:> Description: def _softmax(x, dim): """Computes softmax along a specified dim. Keras currently lacks this feature. """
if K.backend() == 'tensorflow': import tensorflow as tf return tf.nn.softmax(x, dim) elif K.backend() is 'cntk': import cntk return cntk.softmax(x, dim) elif K.backend() == 'theano': # Theano cannot softmax along an arbitrary dim. # So, we will shuffle `dim` to -1 and un-shuffle after softmax. perm = np.arange(K.ndim(x)) perm[dim], perm[-1] = perm[-1], perm[dim] x_perm = K.permute_dimensions(x, perm) output = K.softmax(x_perm) # Permute back perm[dim], perm[-1] = perm[-1], perm[dim] output = K.permute_dimensions(x, output) return output else: raise ValueError("Backend '{}' not supported".format(K.backend()))